aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2018-05-22 10:28:38 +0100
committerMark Brown <broonie@kernel.org>2018-05-22 10:28:38 +0100
commite1c51aa8705fd78e5259e0515aad3b5003e18550 (patch)
tree3728628142a24460d6b488e5331f421f902e5d3a
parent6e49b7fa3083c30688b7fff1b37ed891cd1c30af (diff)
parent08556e03ad36b2c6219aba90de88ccf58038e208 (diff)
Merge branch 'linux-linaro-lsk-v4.9' into linux-linaro-lsk-v4.9-rtlinux-linaro-lsk-v4.9-rt-test
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio2
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt11
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt (renamed from Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt)4
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt11
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt9
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/virtual/kvm/api.txt9
-rw-r--r--Documentation/virtual/kvm/arm/psci.txt30
-rw-r--r--Makefile11
-rw-r--r--arch/alpha/include/asm/futex.h26
-rw-r--r--arch/alpha/kernel/console.c1
-rw-r--r--arch/arc/include/asm/futex.h40
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi1
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed-ast2500-evb.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g25.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi2
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi8
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi9
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi8
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/moxart-uc7112lx.dts2
-rw-r--r--arch/arm/boot/dts/moxart.dtsi17
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi4
-rw-r--r--arch/arm/boot/dts/r7s72100.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7740-armadillo800eva.dts2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi7
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts2
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi10
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi3
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi10
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts2
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi13
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/configs/bcm2835_defconfig4
-rw-r--r--arch/arm/configs/lsk_defconfig932
-rw-r--r--arch/arm/include/asm/futex.h26
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmu.h10
-rw-r--r--arch/arm/include/asm/kvm_psci.h27
-rw-r--r--arch/arm/include/asm/xen/events.h2
-rw-r--r--arch/arm/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm/kernel/ftrace.c11
-rw-r--r--arch/arm/kvm/arm.c11
-rw-r--r--arch/arm/kvm/guest.c13
-rw-r--r--arch/arm/kvm/handle_exit.c4
-rw-r--r--arch/arm/kvm/hyp/Makefile5
-rw-r--r--arch/arm/kvm/hyp/banked-sr.c4
-rw-r--r--arch/arm/kvm/hyp/switch.c2
-rw-r--r--arch/arm/kvm/mmu.c2
-rw-r--r--arch/arm/kvm/psci.c203
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S4
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c10
-rw-r--r--arch/arm/mach-imx/cpu.c3
-rw-r--r--arch/arm/mach-imx/mxc.h6
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap-secure.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/Kconfig43
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi3
-rw-r--r--arch/arm64/crypto/sha256-core.S2061
-rw-r--r--arch/arm64/crypto/sha512-core.S1085
-rw-r--r--arch/arm64/include/asm/assembler.h62
-rw-r--r--arch/arm64/include/asm/barrier.h23
-rw-r--r--arch/arm64/include/asm/cpucaps.h4
-rw-r--r--arch/arm64/include/asm/cputype.h14
-rw-r--r--arch/arm64/include/asm/fixmap.h6
-rw-r--r--arch/arm64/include/asm/futex.h32
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h38
-rw-r--r--arch/arm64/include/asm/kvm_psci.h27
-rw-r--r--arch/arm64/include/asm/memory.h21
-rw-r--r--arch/arm64/include/asm/mmu.h50
-rw-r--r--arch/arm64/include/asm/mmu_context.h7
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h35
-rw-r--r--arch/arm64/include/asm/pgtable.h1
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/include/asm/processor.h24
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/include/asm/tlbflush.h16
-rw-r--r--arch/arm64/include/asm/uaccess.h153
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/asm-offsets.c6
-rw-r--r--arch/arm64/kernel/bpi.S75
-rw-r--r--arch/arm64/kernel/cpu-reset.S2
-rw-r--r--arch/arm64/kernel/cpu_errata.c189
-rw-r--r--arch/arm64/kernel/cpufeature.c139
-rw-r--r--arch/arm64/kernel/entry.S211
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/pci.c4
-rw-r--r--arch/arm64/kernel/perf_event.c23
-rw-r--r--arch/arm64/kernel/process.c12
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S22
-rw-r--r--arch/arm64/kvm/guest.c14
-rw-r--r--arch/arm64/kvm/handle_exit.c16
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S20
-rw-r--r--arch/arm64/kvm/hyp/switch.c6
-rw-r--r--arch/arm64/lib/clear_user.S6
-rw-r--r--arch/arm64/lib/copy_in_user.S4
-rw-r--r--arch/arm64/mm/context.c37
-rw-r--r--arch/arm64/mm/fault.c34
-rw-r--r--arch/arm64/mm/mmap.c19
-rw-r--r--arch/arm64/mm/mmu.c41
-rw-r--r--arch/arm64/mm/proc.S224
-rw-r--r--arch/arm64/net/bpf_jit_comp.c5
-rw-r--r--arch/frv/include/asm/futex.h3
-rw-r--r--arch/frv/include/asm/timex.h6
-rw-r--r--arch/frv/kernel/futex.c27
-rw-r--r--arch/hexagon/include/asm/futex.h38
-rw-r--r--arch/ia64/include/asm/futex.h25
-rw-r--r--arch/ia64/kernel/module.c4
-rw-r--r--arch/microblaze/include/asm/futex.h38
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath25/board.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/mips/include/asm/kprobes.h3
-rw-r--r--arch/mips/include/asm/pgtable-32.h7
-rw-r--r--arch/mips/include/asm/uaccess.h11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c16
-rw-r--r--arch/mips/kernel/smp-bmips.c8
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/libgcc.h17
-rw-r--r--arch/mips/lib/memset.S11
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/mips/net/bpf_jit.c16
-rw-r--r--arch/mips/net/bpf_jit_asm.S23
-rw-r--r--arch/mips/ralink/reset.c7
-rw-r--r--arch/parisc/include/asm/cacheflush.h1
-rw-r--r--arch/parisc/include/asm/futex.h26
-rw-r--r--arch/parisc/kernel/cache.c88
-rw-r--r--arch/parisc/kernel/drivers.c4
-rw-r--r--arch/parisc/kernel/pacache.S22
-rw-r--r--arch/powerpc/include/asm/barrier.h3
-rw-r--r--arch/powerpc/include/asm/code-patching.h1
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/futex.h26
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/page.h12
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c61
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/module_64.c12
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S8
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c34
-rw-r--r--arch/powerpc/lib/code-patching.c5
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c18
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c11
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c8
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c2
-rw-r--r--arch/s390/Kconfig47
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/alternative.h149
-rw-r--r--arch/s390/include/asm/barrier.h24
-rw-r--r--arch/s390/include/asm/facility.h20
-rw-r--r--arch/s390/include/asm/futex.h23
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/lowcore.h7
-rw-r--r--arch/s390/include/asm/nospec-branch.h17
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/kernel/Makefile6
-rw-r--r--arch/s390/kernel/alternative.c112
-rw-r--r--arch/s390/kernel/early.c7
-rw-r--r--arch/s390/kernel/entry.S250
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/module.c65
-rw-r--r--arch/s390/kernel/nospec-branch.c169
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S45
-rw-r--r--arch/s390/kvm/kvm-s390.c14
-rw-r--r--arch/s390/kvm/vsie.c30
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c10
-rw-r--r--arch/sh/include/asm/futex.h26
-rw-r--r--arch/sparc/include/asm/futex_64.h26
-rw-r--r--arch/sparc/kernel/ldc.c7
-rw-r--r--arch/tile/include/asm/futex.h40
-rw-r--r--arch/um/os-Linux/file.c1
-rw-r--r--arch/um/os-Linux/signal.c3
-rw-r--r--arch/x86/Makefile14
-rw-r--r--arch/x86/boot/compressed/error.h4
-rw-r--r--arch/x86/boot/compressed/kaslr.c11
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c3
-rw-r--r--arch/x86/entry/entry_32.S3
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/events/core.c8
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c33
-rw-r--r--arch/x86/events/msr.c9
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/asm-prototypes.h3
-rw-r--r--arch/x86/include/asm/asm.h3
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/efi.h17
-rw-r--r--arch/x86/include/asm/futex.h40
-rw-r--r--arch/x86/include/asm/mmu.h15
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/nospec-branch.h180
-rw-r--r--arch/x86/include/asm/paravirt.h16
-rw-r--r--arch/x86/include/asm/paravirt_types.h5
-rw-r--r--arch/x86/include/asm/reboot.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/x86/kernel/apic/vector.c14
-rw-r--r--arch/x86/kernel/cpu/bugs.c12
-rw-r--r--arch/x86/kernel/cpu/intel.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c70
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c2
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/kprobes/core.c15
-rw-r--r--arch/x86/kernel/kprobes/opt.c3
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/module.c14
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/setup_percpu.c21
-rw-r--r--arch/x86/kernel/smp.c3
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c5
-rw-r--r--arch/x86/kernel/traps.c24
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/vm86_32.c3
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/svm.c40
-rw-r--r--arch/x86/kvm/vmx.c28
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/csum-copy_64.S12
-rw-r--r--arch/x86/lib/kaslr.c3
-rw-r--r--arch/x86/lib/retpoline.S56
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/pgtable.c48
-rw-r--r--arch/x86/mm/tlb.c33
-rw-r--r--arch/x86/net/bpf_jit_comp.c12
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/platform/efi/efi.c6
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c2
-rw-r--r--arch/x86/tools/relocs.c3
-rw-r--r--arch/x86/um/stub_segv.c3
-rw-r--r--arch/x86/xen/smp.c49
-rw-r--r--arch/x86/xen/suspend.c16
-rw-r--r--arch/xtensa/include/asm/futex.h27
-rw-r--r--arch/xtensa/mm/init.c70
-rw-r--r--block/bio-integrity.c3
-rw-r--r--block/bio.c4
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/blk-throttle.c11
-rw-r--r--block/partition-generic.c4
-rw-r--r--block/partitions/msdos.c4
-rw-r--r--crypto/af_alg.c8
-rw-r--r--crypto/ahash.c7
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c4
-rw-r--r--crypto/asymmetric_keys/restrict.c7
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c1
-rw-r--r--crypto/async_tx/async_pq.c5
-rw-r--r--crypto/drbg.c2
-rw-r--r--drivers/acpi/acpi_video.c37
-rw-r--r--drivers/acpi/acpi_watchdog.c4
-rw-r--r--drivers/acpi/acpica/evxfevnt.c18
-rw-r--r--drivers/acpi/acpica/psobject.c14
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c27
-rw-r--r--drivers/acpi/numa.c10
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c50
-rw-r--r--drivers/acpi/power.c10
-rw-r--r--drivers/acpi/processor_driver.c10
-rw-r--r--drivers/acpi/processor_throttling.c62
-rw-r--r--drivers/acpi/sleep.c1
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/ata/libata-core.c29
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/atm/zatm.c3
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/bluetooth/btqcomsmd.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c11
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bus/brcmstb_gisb.c42
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/random.c124
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c4
-rw-r--r--drivers/char/tpm/tpm-dev.c6
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c8
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c9
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c48
-rw-r--r--drivers/char/virtio_console.c49
-rw-r--r--drivers/clk/at91/clk-generated.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c20
-rw-r--r--drivers/clk/bcm/clk-ns2.c2
-rw-r--r--drivers/clk/clk-axi-clkgen.c29
-rw-r--r--drivers/clk/clk-conf.c2
-rw-r--r--drivers/clk/clk-scpi.c6
-rw-r--r--drivers/clk/clk-si5351.c2
-rw-r--r--drivers/clk/clk.c32
-rw-r--r--drivers/clk/meson/Kconfig6
-rw-r--r--drivers/clk/meson/gxbb.c4
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/mvebu/armada-38x.c15
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c2
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c23
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c6
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c14
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/sh-cpufreq.c45
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/crypto/omap-sham.c31
-rw-r--r--drivers/crypto/talitos.c41
-rw-r--r--drivers/dax/dax.c12
-rw-r--r--drivers/devfreq/devfreq.c3
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/imx-sdma.c40
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/edac/altera_edac.c22
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/firmware/psci.c57
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c54
-rw-r--r--drivers/gpio/gpio-wcove.c8
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c4
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c39
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_irq.c14
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c7
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c9
-rw-r--r--drivers/gpu/drm/radeon/cik.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c48
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c28
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/hid/hid-core.c13
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c23
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-rmi.c4
-rw-r--r--drivers/hid/hid-sony.c45
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c26
-rw-r--r--drivers/hid/wacom_sys.c2
-rw-r--r--drivers/hsi/clients/ssi_protocol.c5
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/pmbus/adm1275.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c13
-rw-r--r--drivers/hwtracing/coresight/coresight.c15
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c29
-rw-r--r--drivers/i2c/busses/i2c-scmi.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c17
-rw-r--r--drivers/iio/accel/st_accel_core.c9
-rw-r--r--drivers/iio/adc/hi8435.c27
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c4
-rw-r--r--drivers/iio/imu/adis_trigger.c7
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/light/rpr0521.c17
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c10
-rw-r--r--drivers/iio/pressure/zpa2326.c18
-rw-r--r--drivers/infiniband/core/addr.c16
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/ucma.c76
-rw-r--r--drivers/infiniband/core/umem.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c26
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c93
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h7
-rw-r--r--drivers/infiniband/hw/hfi1/init.c4
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c3
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c26
-rw-r--r--drivers/infiniband/hw/mlx5/main.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c49
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c15
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c41
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c16
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c7
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c9
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/keyboard/qt1070.c9
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c2
-rw-r--r--drivers/input/misc/drv260x.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/mouse/alps.c24
-rw-r--r--drivers/input/mouse/elan_i2c_core.c7
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c9
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/mousedev.c62
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h24
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c9
-rw-r--r--drivers/input/touchscreen/goodix.c8
-rw-r--r--drivers/input/touchscreen/tsc2007.c8
-rw-r--r--drivers/iommu/intel-svm.c10
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/omap-iommu.c21
-rw-r--r--drivers/irqchip/irq-gic-common.c9
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/irqchip/irq-gic-v3.c13
-rw-r--r--drivers/irqchip/irq-mbigen.c5
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/leds/led-core.c1
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-pm8058.c2
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/super.c33
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c30
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c23
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c7
-rw-r--r--drivers/media/dvb-frontends/si2168.c3
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c36
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c11
-rw-r--r--drivers/media/platform/pxa_camera.c14
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c13
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c6
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c11
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/palmas.c14
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/cxl/flash.c8
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/mei/main.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/core/core.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c4
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c14
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mmc/host/sdhci.c7
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/chips/jedec_probe.c2
-rw-r--r--drivers/mtd/mtdchar.c4
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c39
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c16
-rw-r--r--drivers/mtd/nand/nand_base.c14
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c33
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/net/bonding/bond_alb.c15
-rw-r--r--drivers/net/bonding/bond_main.c127
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c75
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c17
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c81
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c23
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c7
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c23
-rw-r--r--drivers/net/geneve.c36
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hyperv/netvsc.c16
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/macsec.c13
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/mdio-mux.c11
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio-xgene.c73
-rw-r--r--drivers/net/phy/mdio-xgene.h4
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c35
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/slip/slhc.c5
-rw-r--r--drivers/net/team/team.c54
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c19
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vrf.c8
-rw-r--r--drivers/net/vxlan.c17
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c18
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c35
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c22
-rw-r--r--drivers/net/wireless/ray_cs.c7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c2
-rw-r--r--drivers/nfc/nfcmrvl/spi.c5
-rw-r--r--drivers/nfc/pn533/i2c.c4
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/nvdimm/namespace_devs.c4
-rw-r--r--drivers/nvdimm/pfn_devs.c15
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pci/host/pci-aardvark.c12
-rw-r--r--drivers/pci/host/pci-hyperv.c24
-rw-r--r--drivers/pci/host/pcie-designware.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci.c52
-rw-r--r--drivers/pci/pcie/aspm.c8
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c44
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/pinctrl/core.c24
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c23
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c8
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c8
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c22
-rw-r--r--drivers/platform/x86/asus-wireless.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c12
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/intel-vbtn.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/bq2415x_charger.c5
-rw-r--r--drivers/power/supply/bq24190_charger.c10
-rw-r--r--drivers/power/supply/isp1704_charger.c4
-rw-r--r--drivers/power/supply/pda_power.c49
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/ptp/ptp_clock.c18
-rw-r--r--drivers/pwm/pwm-rcar.c8
-rw-r--r--drivers/pwm/pwm-stmpe.c2
-rw-r--r--drivers/pwm/pwm-tegra.c7
-rw-r--r--drivers/regulator/anatop-regulator.c5
-rw-r--r--drivers/regulator/core.c9
-rw-r--r--drivers/rtc/interface.c9
-rw-r--r--drivers/rtc/rtc-cmos.c17
-rw-r--r--drivers/rtc/rtc-ds1374.c10
-rw-r--r--drivers/rtc/rtc-m41t80.c12
-rw-r--r--drivers/rtc/rtc-opal.c47
-rw-r--r--drivers/rtc/rtc-snvs.c2
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/qdio_main.c42
-rw-r--r--drivers/s390/net/qeth_core.h7
-rw-r--r--drivers/s390/net/qeth_core_main.c64
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3.h34
-rw-r--r--drivers/s390/net/qeth_l3_main.c121
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/libiscsi.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3
-rw-r--r--drivers/scsi/mac_esp.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c9
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/ses.c12
-rw-r--r--drivers/scsi/sg.c40
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/virtio_scsi.c25
-rw-r--r--drivers/soc/fsl/qbman/qman.c28
-rw-r--r--drivers/soc/fsl/qe/qe.c13
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c9
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--drivers/staging/android/ashmem.c23
-rw-r--r--drivers/staging/comedi/drivers.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2
-rw-r--r--drivers/staging/speakup/kobjects.c8
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c8
-rw-r--r--drivers/staging/wilc1000/host_interface.c2
-rw-r--r--drivers/staging/wilc1000/linux_mon.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/target/target_core_file.c23
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c14
-rw-r--r--drivers/thunderbolt/nhi.c1
-rw-r--r--drivers/tty/n_gsm.c40
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c3
-rw-r--r--drivers/tty/serial/8250/8250_omap.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c11
-rw-r--r--drivers/tty/serial/amba-pl011.c23
-rw-r--r--drivers/tty/serial/atmel_serial.c1
-rw-r--r--drivers/tty/serial/earlycon.c9
-rw-r--r--drivers/tty/serial/imx.c46
-rw-r--r--drivers/tty/serial/sccnxp.c26
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sh-sci.c18
-rw-r--r--drivers/tty/tty_io.c16
-rw-r--r--drivers/tty/tty_ldisc.c16
-rw-r--r--drivers/tty/vt/vt.c14
-rw-r--r--drivers/uio/uio.c8
-rw-r--r--drivers/usb/chipidea/core.c29
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/core.h17
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c89
-rw-r--r--drivers/usb/gadget/function/f_midi.c3
-rw-r--r--drivers/usb/gadget/u_f.h2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c1
-rw-r--r--drivers/usb/gadget/udc/core.c7
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c20
-rw-r--r--drivers/usb/host/ohci-hcd.c10
-rw-r--r--drivers/usb/host/ohci-hub.c4
-rw-r--r--drivers/usb/host/ohci-q.c17
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/misc/lvstest.c1
-rw-r--r--drivers/usb/mon/mon_text.c126
-rw-r--r--drivers/usb/musb/musb_core.c8
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c14
-rw-r--r--drivers/usb/musb/musb_host.c12
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/option.c448
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/visor.c69
-rw-r--r--drivers/usb/storage/ene_ub6250.c11
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/usbip_event.c4
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c29
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c13
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c17
-rw-r--r--drivers/video/backlight/backlight.c15
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/console/vgacon.c34
-rw-r--r--drivers/video/fbdev/amba-clcd.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c16
-rw-r--r--drivers/video/fbdev/sm501fb.c1
-rw-r--r--drivers/video/fbdev/udlfb.c14
-rw-r--r--drivers/video/fbdev/vfb.c17
-rw-r--r--drivers/video/hdmi.c51
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/f71808e_wdt.c29
-rw-r--r--drivers/watchdog/hpwdt.c507
-rw-r--r--drivers/watchdog/watchdog_dev.c6
-rw-r--r--drivers/watchdog/wdat_wdt.c2
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--fs/aio.c55
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/send.c23
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c24
-rw-r--r--fs/ceph/file.c9
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/inode.c33
-rw-r--r--fs/cifs/netmisc.c6
-rw-r--r--fs/cifs/sess.c22
-rw-r--r--fs/cifs/smb2pdu.c29
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dax.c1
-rw-r--r--fs/dcache.c34
-rw-r--r--fs/ext4/balloc.c20
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c54
-rw-r--r--fs/ext4/inode.c11
-rw-r--r--fs/ext4/mballoc.c23
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/ext4/xattr.c3
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/extent_cache.c12
-rw-r--r--fs/f2fs/gc.c6
-rw-r--r--fs/fs-writeback.c9
-rw-r--r--fs/jbd2/journal.c22
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/lockd/svc.c4
-rw-r--r--fs/namei.c11
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/ncpfs/ncplib_kernel.c4
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/nfs/nfs4state.c10
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c18
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c83
-rw-r--r--fs/nfsd/nfs4proc.c6
-rw-r--r--fs/nfsd/nfs4state.c62
-rw-r--r--fs/nfsd/vfs.c24
-rw-r--r--fs/notify/fanotify/fanotify.c34
-rw-r--r--fs/orangefs/super.c5
-rw-r--r--fs/orangefs/waitqueue.c9
-rw-r--r--fs/overlayfs/dir.c3
-rw-r--r--fs/overlayfs/inode.c12
-rw-r--r--fs/proc/base.c10
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/reiserfs.h1
-rw-r--r--fs/reiserfs/super.c21
-rw-r--r--fs/super.c6
-rw-r--r--fs/ubifs/super.c14
-rw-r--r--fs/udf/unicode.c6
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--fs/xfs/xfs_qm.c46
-rw-r--r--include/acpi/actbl2.h9
-rw-r--r--include/acpi/platform/acgcc.h10
-rw-r--r--include/acpi/platform/acintel.h2
-rw-r--r--include/asm-generic/futex.h50
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h3
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h1
-rw-r--r--include/kvm/arm_psci.h63
-rw-r--r--include/linux/arm-smccc.h165
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h30
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/cpumask.h10
-rw-r--r--include/linux/dax.h5
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/hid.h6
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/init.h11
-rw-r--r--include/linux/jiffies.h13
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/llist.h21
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/netfilter/x_tables.h40
-rw-r--r--include/linux/nospec.h26
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/platform_data/isl9305.h2
-rw-r--r--include/linux/posix-clock.h10
-rw-r--r--include/linux/psci.h14
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/rhashtable.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/serial_core.h21
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bonding.h1
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/inet_timewait_sock.h1
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/sch_generic.h19
-rw-r--r--include/net/slhc_vj.h1
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/net/udplite.h1
-rw-r--r--include/net/x25.h4
-rw-r--r--include/rdma/ib_addr.h2
-rw-r--r--include/soc/fsl/qe/qe.h5
-rw-r--r--include/sound/control.h7
-rw-r--r--include/sound/pcm_oss.h1
-rw-r--r--include/uapi/linux/eventpoll.h8
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/libc-compat.h55
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/psci.h3
-rw-r--r--include/uapi/linux/random.h3
-rw-r--r--include/uapi/linux/usb/audio.h4
-rw-r--r--init/main.c7
-rw-r--r--ipc/shm.c35
-rw-r--r--kernel/bpf/arraymap.c37
-rw-r--r--kernel/bpf/hashtab.c9
-rw-r--r--kernel/bpf/stackmap.c1
-rw-r--r--kernel/bpf/syscall.c22
-rw-r--r--kernel/cpu.c13
-rw-r--r--kernel/events/callchain.c31
-rw-r--r--kernel/events/core.c31
-rw-r--r--kernel/events/hw_breakpoint.c30
-rw-r--r--kernel/events/ring_buffer.c7
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/futex.c39
-rw-r--r--kernel/irq/debug.h5
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/locking/locktorture.c76
-rw-r--r--kernel/memremap.c13
-rw-r--r--kernel/module.c20
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/printk/braille.c15
-rw-r--r--kernel/printk/braille.h13
-rw-r--r--kernel/printk/printk.c8
-rw-r--r--kernel/resource.c3
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/deadline.c98
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/posix-clock.c34
-rw-r--r--kernel/time/sched_clock.c5
-rw-r--r--kernel/time/timer.c6
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_probe.c8
-rw-r--r--kernel/trace/trace_probe.h2
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--kernel/workqueue.c16
-rw-r--r--lib/ioremap.c6
-rw-r--r--lib/kobject.c12
-rw-r--r--lib/mpi/longlong.h18
-rw-r--r--lib/rhashtable.c4
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/frame_vector.c12
-rw-r--r--mm/gup.c67
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/khugepaged.c7
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/memory.c40
-rw-r--r--mm/page-writeback.c18
-rw-r--r--mm/percpu.c1
-rw-r--r--mm/shmem.c31
-rw-r--r--mm/slab.c3
-rw-r--r--mm/vmscan.c19
-rw-r--r--mm/vmstat.c22
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c20
-rw-r--r--net/bluetooth/6lowpan.c10
-rw-r--r--net/bluetooth/af_bluetooth.c24
-rw-r--r--net/bluetooth/hci_conn.c29
-rw-r--r--net/bluetooth/hci_core.c17
-rw-r--r--net/bluetooth/hci_event.c15
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/bridge/netfilter/ebt_among.c55
-rw-r--r--net/bridge/netfilter/ebtables.c13
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/ceph/mon_client.c14
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/compat.c6
-rw-r--r--net/core/dev.c39
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/neighbour.c54
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/skbuff.c78
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dccp/ccids/ccid2.c14
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/dns_resolver/dns_key.c13
-rw-r--r--net/hsr/hsr_forward.c3
-rw-r--r--net/hsr/hsr_framereg.c9
-rw-r--r--net/hsr/hsr_framereg.h2
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ieee802154/socket.c8
-rw-r--r--net/ipv4/ah4.c8
-rw-r--r--net/ipv4/arp.c18
-rw-r--r--net/ipv4/esp4.c13
-rw-r--r--net/ipv4/fib_semantics.c25
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_sockglue.c13
-rw-r--r--net/ipv4/ip_tunnel.c11
-rw-r--r--net/ipv4/ipmr.c18
-rw-r--r--net/ipv4/netfilter/arp_tables.c20
-rw-r--r--net/ipv4/netfilter/ip_tables.c23
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c57
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_input.c47
-rw-r--r--net/ipv4/tcp_output.c33
-rw-r--r--net/ipv4/udp.c12
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/esp6.c12
-rw-r--r--net/ipv6/ip6_checksum.c5
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_output.c20
-rw-r--r--net/ipv6/ip6_tunnel.c23
-rw-r--r--net/ipv6/ip6_vti.c7
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c21
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c4
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/kcm/kcmsock.c34
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c8
-rw-r--r--net/l2tp/l2tp_ppp.c7
-rw-r--r--net/llc/af_llc.c20
-rw-r--r--net/llc/llc_c_ac.c9
-rw-r--r--net/llc/llc_conn.c22
-rw-r--r--net/mac80211/cfg.c60
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/ibss.c10
-rw-r--r--net/mac80211/ieee80211_i.h36
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/mesh.c29
-rw-r--r--net/mac80211/mesh_plink.c37
-rw-r--r--net/mac80211/mlme.c18
-rw-r--r--net/mac80211/rate.c10
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c13
-rw-r--r--net/mac80211/status.c1
-rw-r--r--net/mac80211/tdls.c29
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c6
-rw-r--r--net/mpls/af_mpls.c36
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c30
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c155
-rw-r--r--net/netfilter/nf_conntrack_core.c39
-rw-r--r--net/netfilter/nf_conntrack_helper.c26
-rw-r--r--net/netfilter/nf_conntrack_netlink.c14
-rw-r--r--net/netfilter/nf_nat_proto_common.c7
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/netfilter/x_tables.c89
-rw-r--r--net/netfilter/xt_CT.c11
-rw-r--r--net/netfilter/xt_IDLETIMER.c9
-rw-r--r--net/netfilter/xt_LED.c12
-rw-r--r--net/netfilter/xt_hashlimit.c11
-rw-r--r--net/netfilter/xt_recent.c6
-rw-r--r--net/netlink/af_netlink.c9
-rw-r--r--net/netlink/genetlink.c12
-rw-r--r--net/openvswitch/conntrack.c30
-rw-r--r--net/openvswitch/flow_netlink.c9
-rw-r--r--net/packet/af_packet.c82
-rw-r--r--net/packet/internal.h10
-rw-r--r--net/rds/bind.c1
-rw-r--r--net/rds/send.c15
-rw-r--r--net/rfkill/rfkill-gpio.c7
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/rxrpc/rxkad.c19
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_bpf.c12
-rw-r--r--net/sched/act_csum.c12
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_skbmod.c3
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/sch_fq.c37
-rw-r--r--net/sched/sch_netem.c28
-rw-r--r--net/sctp/associola.c30
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c75
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c7
-rw-r--r--net/sctp/sm_statefuns.c89
-rw-r--r--net/sctp/socket.c33
-rw-r--r--net/sctp/ulpevent.c1
-rw-r--r--net/strparser/strparser.c11
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c3
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/tipc/bearer.c5
-rw-r--r--net/tipc/monitor.c6
-rw-r--r--net/tipc/netlink.c3
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/util.c6
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/sysctl_net_x25.c5
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c15
-rw-r--r--net/xfrm/xfrm_user.c21
-rw-r--r--scripts/Makefile.lib8
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/integrity/ima/ima_appraise.c3
-rw-r--r--security/selinux/hooks.c34
-rw-r--r--security/selinux/ss/services.c2
-rw-r--r--sound/core/oss/pcm_oss.c200
-rw-r--r--sound/core/pcm.c8
-rw-r--r--sound/core/pcm_compat.c2
-rw-r--r--sound/core/pcm_native.c3
-rw-r--r--sound/core/rawmidi_compat.c18
-rw-r--r--sound/core/seq/oss/seq_oss_event.c15
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c85
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h3
-rw-r--r--sound/core/seq/seq_clientmgr.c25
-rw-r--r--sound/core/seq/seq_fifo.c2
-rw-r--r--sound/core/seq/seq_memory.c14
-rw-r--r--sound/core/seq/seq_memory.h3
-rw-r--r--sound/core/seq/seq_prioq.c28
-rw-r--r--sound/core/seq/seq_prioq.h6
-rw-r--r--sound/core/seq/seq_queue.c28
-rw-r--r--sound/core/seq/seq_virmidi.c4
-rw-r--r--sound/drivers/aloop.c46
-rw-r--r--sound/drivers/opl3/opl3_synth.c7
-rw-r--r--sound/firewire/amdtp-stream.c5
-rw-r--r--sound/firewire/amdtp-stream.h3
-rw-r--r--sound/firewire/dice/dice-stream.c2
-rw-r--r--sound/firewire/dice/dice.c2
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c55
-rw-r--r--sound/firewire/digi00x/digi00x.c13
-rw-r--r--sound/firewire/digi00x/digi00x.h1
-rw-r--r--sound/pci/asihpi/hpimsginit.c13
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/hda/hda_hwdep.c12
-rw-r--r--sound/pci/hda/hda_intel.c46
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c41
-rw-r--r--sound/pci/rme9652/hdspm.c24
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/soc/codecs/nau8825.c1
-rw-r--r--sound/soc/codecs/rt5651.c1
-rw-r--r--sound/soc/codecs/rt5677.c7
-rw-r--r--sound/soc/codecs/sgtl5000.c11
-rw-r--r--sound/soc/codecs/ssm2602.c19
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c16
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c7
-rw-r--r--sound/soc/intel/skylake/skl-messages.c4
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c4
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c4
-rw-r--r--sound/soc/sh/rcar/cmd.c3
-rw-r--r--sound/soc/sh/rcar/ssi.c20
-rw-r--r--sound/usb/line6/midi.c2
-rw-r--r--sound/usb/mixer_maps.c3
-rw-r--r--sound/usb/quirks-table.h47
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/lib/str_error_r.c2
-rw-r--r--tools/lib/subcmd/pager.c5
-rw-r--r--tools/perf/builtin-probe.c6
-rw-r--r--tools/perf/builtin-stat.c25
-rw-r--r--tools/perf/builtin-trace.c47
-rw-r--r--tools/perf/tests/kmod-path.c2
-rw-r--r--tools/perf/util/annotate.c10
-rw-r--r--tools/perf/util/build-id.c6
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/evsel.c12
-rw-r--r--tools/perf/util/header.c12
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c64
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h2
-rw-r--r--tools/perf/util/intel-pt.c37
-rw-r--r--tools/perf/util/ordered-events.c3
-rw-r--r--tools/perf/util/probe-event.c10
-rw-r--r--tools/perf/util/session.c17
-rw-r--r--tools/perf/util/sort.c5
-rw-r--r--tools/perf/util/trigger.h9
-rw-r--r--tools/perf/util/unwind-libdw.c14
-rw-r--r--tools/perf/util/unwind-libunwind-local.c11
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/testing/nvdimm/test/nfit.c10
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh7
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c2
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c117
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c3
-rw-r--r--tools/testing/selftests/x86/protection_keys.c49
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c8
-rw-r--r--tools/usb/usbip/src/usbipd.c2
-rw-r--r--virt/kvm/kvm_main.c3
1383 files changed, 19173 insertions, 6545 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index fee35c00cc4e..0406076e4405 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -32,7 +32,7 @@ Description:
Description of the physical chip / device for device X.
Typically a part number.
-What: /sys/bus/iio/devices/iio:deviceX/timestamp_clock
+What: /sys/bus/iio/devices/iio:deviceX/current_timestamp_clock
KernelVersion: 4.5
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index d11af52427b4..ac9489fad31b 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -54,6 +54,7 @@ stable kernels.
| ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A72 | #853709 | N/A |
+| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
index 2b7b3fa588d7..606da38c0959 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
@@ -1,11 +1,14 @@
-* Amlogic Meson8b Clock and Reset Unit
+* Amlogic Meson8, Meson8b and Meson8m2 Clock and Reset Unit
-The Amlogic Meson8b clock controller generates and supplies clock to various
-controllers within the SoC.
+The Amlogic Meson8 / Meson8b / Meson8m2 clock controller generates and
+supplies clock to various controllers within the SoC.
Required Properties:
-- compatible: should be "amlogic,meson8b-clkc"
+- compatible: must be one of:
+ - "amlogic,meson8-clkc" for Meson8 (S802) SoCs
+ - "amlogic,meson8b-clkc" for Meson8 (S805) SoCs
+ - "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs
- reg: it must be composed by two tuples:
0) physical base address of the xtal register and length of memory
mapped region.
diff --git a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
index 7175dc3740ac..ed34253d9fb1 100644
--- a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt
+++ b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
@@ -2,7 +2,7 @@ Toppoly TD028TTEC1 Panel
========================
Required properties:
-- compatible: "toppoly,td028ttec1"
+- compatible: "tpo,td028ttec1"
Optional properties:
- label: a symbolic name for the panel
@@ -14,7 +14,7 @@ Example
-------
lcd-panel: td028ttec1@0 {
- compatible = "toppoly,td028ttec1";
+ compatible = "tpo,td028ttec1";
reg = <0>;
spi-max-frequency = <100000>;
spi-cpol;
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 4f7ae7555758..bda9d6fab6b4 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -47,10 +47,13 @@ Required properties:
Documentation/devicetree/bindings/media/video-interfaces.txt. The
first port should be the input endpoint, the second one the output
- The output should have two endpoints. The first is the block
- connected to the TCON channel 0 (usually a panel or a bridge), the
- second the block connected to the TCON channel 1 (usually the TV
- encoder)
+ The output may have multiple endpoints. The TCON has two channels,
+ usually with the first channel being used for the panels interfaces
+ (RGB, LVDS, etc.), and the second being used for the outputs that
+ require another controller (TV Encoder, HDMI, etc.). The endpoints
+ will take an extra property, allwinner,tcon-channel, to specify the
+ channel the endpoint is associated to. If that property is not
+ present, the endpoint number will be used as the channel number.
On SoCs other than the A33, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 8f3ad9ab4637..b41d2601c6ba 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -28,6 +28,9 @@ Optional properties:
regulator to drive the OTG VBus, rather then as an input pin
which signals whether the board is driving OTG VBus or not.
+- x-powers,master-mode: Boolean (axp806 only). Set this when the PMIC is
+ wired for master mode. The default is slave mode.
+
- <input>-supply: a phandle to the regulator supply node. May be omitted if
inputs are unregulated, such as using the IPSOUT output
from the PMIC.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
index caf297bee1fb..c28d4eb83b76 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
@@ -35,6 +35,15 @@ Optional properties:
- ti,palmas-enable-dvfs2: Enable DVFS2. Configure pins for DVFS2 mode.
Selection primary or secondary function associated to GPADC_START
and SYSEN2 pin/pad for DVFS2 interface
+- ti,palmas-override-powerhold: This is applicable for PMICs for which
+ GPIO7 is configured in POWERHOLD mode which has higher priority
+ over DEV_ON bit and keeps the PMIC supplies on even after the DEV_ON
+ bit is turned off. This property enables driver to over ride the
+ POWERHOLD value to GPIO7 so as to turn off the PMIC in power off
+ scenarios. So for GPIO7 if ti,palmas-override-powerhold is set
+ then the GPIO_7 field should never be muxed to anything else.
+ It should be set to POWERHOLD by default and only in case of
+ power off scenarios the driver will over ride the mux value.
This binding uses the following generic properties as defined in
pinctrl-bindings.txt:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 466c039c622b..5f9e51436a99 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2640,6 +2640,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noalign [KNL,ARM]
+ noaltinstr [S390] Disables alternative instructions patching
+ (CPU alternatives feature).
+
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1f5eab4ef88f..e46c14fac9da 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2118,6 +2118,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
ARM 64-bit FP registers have the following id bit patterns:
0x4030 0000 0012 0 <regno:12>
+ARM firmware pseudo-registers have the following bit pattern:
+ 0x4030 0000 0014 <regno:16>
+
arm64 registers are mapped using the lower 32 bits. The upper 16 of
that is the register group type, or coprocessor number:
@@ -2134,6 +2137,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
arm64 system registers have the following id bit patterns:
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
+arm64 firmware pseudo-registers have the following bit pattern:
+ 0x6030 0000 0014 <regno:16>
+
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type:
@@ -2656,7 +2662,8 @@ Possible features:
and execute guest code when KVM_RUN is called.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
+ - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
+ backward compatible with v0.2) for the CPU.
Depends on KVM_CAP_ARM_PSCI_0_2.
- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 000000000000..aafdab887b04
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
+KVM implements the PSCI (Power State Coordination Interface)
+specification in order to provide services such as CPU on/off, reset
+and power-off to the guest.
+
+The PSCI specification is regularly updated to provide new features,
+and KVM implements these updates if they make sense from a virtualization
+point of view.
+
+This means that a guest booted on two different versions of KVM can
+observe two different "firmware" revisions. This could cause issues if
+a given guest is tied to a particular PSCI revision (unlikely), or if
+a migration causes a different PSCI version to be exposed out of the
+blue to an unsuspecting guest.
+
+In order to remedy this situation, KVM exposes a set of "firmware
+pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
+interface. These registers can be saved/restored by userspace, and set
+to a convenient value if required.
+
+The following register is defined:
+
+* KVM_REG_ARM_PSCI_VERSION:
+
+ - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
+ (and thus has already been initialized)
+ - Returns the current PSCI version on GET_ONE_REG (defaulting to the
+ highest PSCI version implemented by KVM and compatible with v0.2)
+ - Allows any PSCI version implemented by KVM and compatible with
+ v0.2 to be set with SET_ONE_REG
+ - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/Makefile b/Makefile
index db13b13cdcc2..7d7bda23db8f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 84
+SUBLEVEL = 101
EXTRAVERSION =
NAME = Roaring Lionus
@@ -790,6 +790,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
+# clang sets -fmerge-all-constants by default as optimization, but this
+# is non-conforming behavior for C and in fact breaks the kernel, so we
+# need to disable it here generally.
+KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
+
+# for gcc -fno-merge-all-constants disables everything, but it is fine
+# to have actual conforming behavior enabled.
+KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
+
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794363ac..56474690e685 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -29,18 +29,10 @@
: "r" (uaddr), "r"(oparg) \
: "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
index 6a61deed4a85..ab228ed45945 100644
--- a/arch/alpha/kernel/console.c
+++ b/arch/alpha/kernel/console.c
@@ -20,6 +20,7 @@
struct pci_controller *pci_vga_hose;
static struct resource alpha_vga = {
.name = "alpha-vga+",
+ .flags = IORESOURCE_IO,
.start = 0x3C0,
.end = 0x3DF
};
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f3acda..eb887dd13e74 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
#ifndef CONFIG_ARC_HAS_LLSC
preempt_disable(); /* to guarantee atomic r-m-w of futex op */
#endif
@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 6df7829a2c15..78bee26361f1 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -204,6 +204,7 @@
interrupt-controller;
ti,system-power-controller;
+ ti,palmas-override-powerhold;
tps659038_pmic {
compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index db858fff4e18..1cc62727e43a 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -57,6 +57,7 @@
#interrupt-cells = <2>;
interrupt-controller;
ti,system-power-controller;
+ ti,palmas-override-powerhold;
tps659038_pmic {
compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
index 1b7a5ff0e533..d7774d35e466 100644
--- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
@@ -15,7 +15,7 @@
bootargs = "console=ttyS4,115200 earlyprintk";
};
- memory {
+ memory@80000000 {
reg = <0x80000000 0x20000000>;
};
};
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index a7da0dd0c98f..0898213f3bb2 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -21,7 +21,7 @@
atmel,mux-mask = <
/* A B C */
0xffffffff 0xffe0399f 0xc000001c /* pioA */
- 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */
+ 0x0007ffff 0x00047e3f 0x00000000 /* pioB */
0x80000000 0x07c0ffff 0xb83fffff /* pioC */
0x003fffff 0x003f8000 0x00000000 /* pioD */
>;
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
index 12c981e51134..9a0599f711ff 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
@@ -1,6 +1,6 @@
/ {
aliases {
- ethernet = &ethernet;
+ ethernet0 = &ethernet;
};
};
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
index 3f0a56ebcf1f..dc7ae776db5f 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
@@ -1,6 +1,6 @@
/ {
aliases {
- ethernet = &ethernet;
+ ethernet0 = &ethernet;
};
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 132f2be10889..56311fd34f81 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -398,6 +398,8 @@
tps659038: tps659038@58 {
compatible = "ti,tps659038";
reg = <0x58>;
+ ti,palmas-override-powerhold;
+ ti,system-power-controller;
tps659038_pmic {
compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 41ecd6d465a7..75a60633efff 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -408,7 +408,7 @@
reg = <0>;
vdd3-supply = <&lcd_vdd3_reg>;
vci-supply = <&ldo25_reg>;
- reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>;
power-on-delay= <50>;
reset-delay = <100>;
init-delay = <100>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f7357d99b47c..64de33d067c9 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -640,7 +640,7 @@
power-domains = <&pd_gsc>;
clocks = <&clock CLK_GSCL0>;
clock-names = "gscl";
- iommu = <&sysmmu_gsc0>;
+ iommus = <&sysmmu_gsc0>;
};
gsc_1: gsc@13e10000 {
@@ -650,7 +650,7 @@
power-domains = <&pd_gsc>;
clocks = <&clock CLK_GSCL1>;
clock-names = "gscl";
- iommu = <&sysmmu_gsc1>;
+ iommus = <&sysmmu_gsc1>;
};
gsc_2: gsc@13e20000 {
@@ -660,7 +660,7 @@
power-domains = <&pd_gsc>;
clocks = <&clock CLK_GSCL2>;
clock-names = "gscl";
- iommu = <&sysmmu_gsc2>;
+ iommus = <&sysmmu_gsc2>;
};
gsc_3: gsc@13e30000 {
@@ -670,7 +670,7 @@
power-domains = <&pd_gsc>;
clocks = <&clock CLK_GSCL3>;
clock-names = "gscl";
- iommu = <&sysmmu_gsc3>;
+ iommus = <&sysmmu_gsc3>;
};
hdmi: hdmi@14530000 {
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 96d7eede412e..036c9bd9bf75 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -23,7 +23,7 @@
imx53-qsrb {
pinctrl_pmic: pmicgrp {
fsl,pins = <
- MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */
+ MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */
>;
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 4f2c5ec75714..e262fa9ef334 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -97,6 +97,8 @@
};
&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>;
twl: twl@48 {
@@ -215,7 +217,12 @@
>;
};
-
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
};
&omap3_pmx_wkup {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index efe53998c961..08f0a35dc0d1 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -100,6 +100,8 @@
};
&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>;
twl: twl@48 {
@@ -207,6 +209,12 @@
OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
>;
};
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
};
&uart2 {
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 940875316d0f..67b4de0e3439 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -215,7 +215,7 @@
reg = <0x2a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
- clocks = <&sys_mclk 1>;
+ clocks = <&sys_mclk>;
};
};
};
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index a8b148ad1dd2..44715c8ef756 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -187,7 +187,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
- clocks = <&sys_mclk 1>;
+ clocks = <&sys_mclk>;
};
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 368e21934285..825f6eae3d1c 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -146,7 +146,7 @@
};
esdhc: esdhc@1560000 {
- compatible = "fsl,esdhc";
+ compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
index 10d088df0c35..4a962a26482d 100644
--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
@@ -6,7 +6,7 @@
*/
/dts-v1/;
-/include/ "moxart.dtsi"
+#include "moxart.dtsi"
/ {
model = "MOXA UC-7112-LX";
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1fd27ed65a01..64f2f44235d0 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -6,6 +6,7 @@
*/
/include/ "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
/ {
compatible = "moxa,moxart";
@@ -36,8 +37,8 @@
ranges;
intc: interrupt-controller@98800000 {
- compatible = "moxa,moxart-ic";
- reg = <0x98800000 0x38>;
+ compatible = "moxa,moxart-ic", "faraday,ftintc010";
+ reg = <0x98800000 0x100>;
interrupt-controller;
#interrupt-cells = <2>;
interrupt-mask = <0x00080000>;
@@ -59,7 +60,7 @@
timer: timer@98400000 {
compatible = "moxa,moxart-timer";
reg = <0x98400000 0x42>;
- interrupts = <19 1>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
clocks = <&clk_apb>;
};
@@ -80,7 +81,7 @@
dma: dma@90500000 {
compatible = "moxa,moxart-dma";
reg = <0x90500080 0x40>;
- interrupts = <24 0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
};
@@ -93,7 +94,7 @@
sdhci: sdhci@98e00000 {
compatible = "moxa,moxart-sdhci";
reg = <0x98e00000 0x5C>;
- interrupts = <5 0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_apb>;
dmas = <&dma 5>,
<&dma 5>;
@@ -120,7 +121,7 @@
mac0: mac@90900000 {
compatible = "moxa,moxart-mac";
reg = <0x90900000 0x90>;
- interrupts = <25 0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
phy-handle = <&ethphy0>;
phy-mode = "mii";
status = "disabled";
@@ -129,7 +130,7 @@
mac1: mac@92000000 {
compatible = "moxa,moxart-mac";
reg = <0x92000000 0x90>;
- interrupts = <27 0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
phy-handle = <&ethphy1>;
phy-mode = "mii";
status = "disabled";
@@ -138,7 +139,7 @@
uart0: uart@98200000 {
compatible = "ns16550a";
reg = <0x98200000 0x20>;
- interrupts = <31 8>;
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
clock-frequency = <14745600>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index b7a24af8f47b..4b7d97275c62 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -154,10 +154,10 @@
i2c_0: i2c@78b7000 {
compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0x78b7000 0x6000>;
+ reg = <0x78b7000 0x600>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+ <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
clock-names = "iface", "core";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index fb9ef9ca120e..959e3edf367b 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -112,7 +112,7 @@
#clock-cells = <1>;
compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0xfcfe0430 4>;
- clocks = <&p0_clk>;
+ clocks = <&b_clk>;
clock-indices = <R7S72100_CLK_ETHER>;
clock-output-names = "ether";
};
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 7885075428bb..1788e186a512 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -266,7 +266,9 @@
lcd0_pins: lcd0 {
groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync";
function = "lcd0";
+ };
+ lcd0_mux {
/* DBGMD/LCDC0/FSIA MUX */
gpio-hog;
gpios = <176 0>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index b6c6410ca384..262a51205aee 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1437,8 +1437,11 @@
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index f8a7d090fd01..12841e9bab98 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -279,7 +279,7 @@
x2_clk: x2-clock {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <148500000>;
+ clock-frequency = <74250000>;
};
x13_clk: x13-clock {
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 162b55c665a3..59405ebdce01 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -74,9 +74,8 @@
next-level-cache = <&L2_CA15>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7791_PD_CA15_SCU>;
cache-unified;
cache-level = <2>;
@@ -1438,8 +1437,11 @@
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 713141d38b3e..0b50c6766867 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -58,9 +58,8 @@
next-level-cache = <&L2_CA15>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
cache-unified;
cache-level = <2>;
power-domains = <&sysc R8A7792_PD_CA15_SCU>;
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 8d02aacf2892..e9625cb3bbaa 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -65,9 +65,8 @@
power-domains = <&sysc R8A7793_PD_CA15_CPU1>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7793_PD_CA15_SCU>;
cache-unified;
cache-level = <2>;
@@ -1235,8 +1234,11 @@
compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
<&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index cf880ac06f4b..8874451fb914 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -425,7 +425,7 @@
status = "okay";
clocks = <&mstp7_clks R8A7794_CLK_DU0>,
- <&mstp7_clks R8A7794_CLK_DU0>,
+ <&mstp7_clks R8A7794_CLK_DU1>,
<&x2_clk>, <&x3_clk>;
clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1";
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 7e860d3737ff..d8f4ca85ed3f 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -56,9 +56,8 @@
next-level-cache = <&L2_CA7>;
};
- L2_CA7: cache-controller@0 {
+ L2_CA7: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7794_PD_CA7_SCU>;
cache-unified;
cache-level = <2>;
@@ -917,7 +916,7 @@
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp7_clks R8A7794_CLK_DU0>,
- <&mstp7_clks R8A7794_CLK_DU0>;
+ <&mstp7_clks R8A7794_CLK_DU1>;
clock-names = "du.0", "du.1";
status = "disabled";
@@ -1262,19 +1261,21 @@
clocks = <&mp_clk>, <&hp_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&zx_clk>;
+ <&zx_clk>, <&zx_clk>;
#clock-cells = <1>;
clock-indices = <
R8A7794_CLK_EHCI R8A7794_CLK_HSUSB
R8A7794_CLK_HSCIF2 R8A7794_CLK_SCIF5
R8A7794_CLK_SCIF4 R8A7794_CLK_HSCIF1 R8A7794_CLK_HSCIF0
R8A7794_CLK_SCIF3 R8A7794_CLK_SCIF2 R8A7794_CLK_SCIF1
- R8A7794_CLK_SCIF0 R8A7794_CLK_DU0
+ R8A7794_CLK_SCIF0
+ R8A7794_CLK_DU1 R8A7794_CLK_DU0
>;
clock-output-names =
"ehci", "hsusb",
"hscif2", "scif5", "scif4", "hscif1", "hscif0",
- "scif3", "scif2", "scif1", "scif0", "du0";
+ "scif3", "scif2", "scif1", "scif0",
+ "du1", "du0";
};
mstp8_clks: mstp8_clks@e6150990 {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 9e6bf0e311bb..2679dc80f831 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -617,9 +617,9 @@
<0 12 RK_FUNC_1 &pcfg_pull_none>,
<0 13 RK_FUNC_1 &pcfg_pull_none>,
<0 14 RK_FUNC_1 &pcfg_pull_none>,
- <1 2 RK_FUNC_1 &pcfg_pull_none>,
- <1 4 RK_FUNC_1 &pcfg_pull_none>,
- <1 5 RK_FUNC_1 &pcfg_pull_none>;
+ <1 2 RK_FUNC_2 &pcfg_pull_none>,
+ <1 4 RK_FUNC_2 &pcfg_pull_none>,
+ <1 5 RK_FUNC_2 &pcfg_pull_none>;
};
};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 65e725fb5679..de0e189711f6 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1362,7 +1362,7 @@
pinctrl@fc06a000 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
ranges = <0xfc068000 0xfc068000 0x100
0xfc06a000 0xfc06a000 0x4000>;
/* WARNING: revisit as pin spec has changed */
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 79de828e49ad..e32b0550a338 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -1,6 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -32,6 +31,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
CONFIG_KSM=y
CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
@@ -52,6 +52,7 @@ CONFIG_MAC80211=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
@@ -62,7 +63,6 @@ CONFIG_USB_NET_SMSC95XX=y
CONFIG_ZD1211RW=y
CONFIG_INPUT_EVDEV=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_TTY_PRINTK=y
diff --git a/arch/arm/configs/lsk_defconfig b/arch/arm/configs/lsk_defconfig
new file mode 100644
index 000000000000..5dce9050d229
--- /dev/null
+++ b/arch/arm/configs/lsk_defconfig
@@ -0,0 +1,932 @@
+CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMDLINE_PARTITION=y
+CONFIG_ARCH_MULTI_V7=y
+# CONFIG_ARCH_MULTI_V5 is not set
+# CONFIG_ARCH_MULTI_V4 is not set
+CONFIG_ARCH_VIRT=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_ARTPEC=y
+CONFIG_MACH_ARTPEC6=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
+CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
+CONFIG_ARCH_AT91=y
+CONFIG_SOC_SAMA5D2=y
+CONFIG_SOC_SAMA5D3=y
+CONFIG_SOC_SAMA5D4=y
+CONFIG_ARCH_BCM=y
+CONFIG_ARCH_BCM_CYGNUS=y
+CONFIG_ARCH_BCM_NSP=y
+CONFIG_ARCH_BCM_21664=y
+CONFIG_ARCH_BCM_281XX=y
+CONFIG_ARCH_BCM_5301X=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_63XX=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_MACH_BERLIN_BG2=y
+CONFIG_MACH_BERLIN_BG2CD=y
+CONFIG_MACH_BERLIN_BG2Q=y
+CONFIG_ARCH_DIGICOLOR=y
+CONFIG_ARCH_HIGHBANK=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_HI3xxx=y
+CONFIG_ARCH_HIX5HD2=y
+CONFIG_ARCH_HIP01=y
+CONFIG_ARCH_HIP04=y
+CONFIG_ARCH_KEYSTONE=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MXC=y
+CONFIG_SOC_IMX50=y
+CONFIG_SOC_IMX51=y
+CONFIG_SOC_IMX53=y
+CONFIG_SOC_IMX6Q=y
+CONFIG_SOC_IMX6SL=y
+CONFIG_SOC_IMX6SX=y
+CONFIG_SOC_IMX6UL=y
+CONFIG_SOC_IMX7D=y
+CONFIG_SOC_VF610=y
+CONFIG_SOC_LS1021A=y
+CONFIG_ARCH_OMAP3=y
+CONFIG_ARCH_OMAP4=y
+CONFIG_SOC_OMAP5=y
+CONFIG_SOC_AM33XX=y
+CONFIG_SOC_AM43XX=y
+CONFIG_SOC_DRA7XX=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MSM8X60=y
+CONFIG_ARCH_MSM8960=y
+CONFIG_ARCH_MSM8974=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+CONFIG_ARCH_STI=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_EXYNOS5420_MCPM=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_EMEV2=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R8A73A4=y
+CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7778=y
+CONFIG_ARCH_R8A7779=y
+CONFIG_ARCH_R8A7790=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7792=y
+CONFIG_ARCH_R8A7793=y
+CONFIG_ARCH_R8A7794=y
+CONFIG_ARCH_SH73A0=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_SIRF=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
+CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_U8500=y
+CONFIG_MACH_HREFV60=y
+CONFIG_MACH_SNOWBALL=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_VEXPRESS_CA9X4=y
+CONFIG_ARCH_VEXPRESS_TC2_PM=y
+CONFIG_ARCH_WM8850=y
+CONFIG_ARCH_ZYNQ=y
+CONFIG_TRUSTED_FOUNDATIONS=y
+CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_KEYSTONE=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MVEBU=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCI_RCAR_GEN2=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=16
+CONFIG_HIGHPTE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_KEXEC=y
+CONFIG_EFI=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_IMX6Q_CPUFREQ=y
+CONFIG_QORIQ_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NET_DSA=m
+CONFIG_NET_SWITCHDEV=y
+CONFIG_CAN=y
+CONFIG_CAN_RAW=y
+CONFIG_CAN_BCM=y
+CONFIG_CAN_DEV=y
+CONFIG_CAN_AT91=m
+CONFIG_CAN_RCAR=m
+CONFIG_CAN_XILINXCAN=y
+CONFIG_CAN_MCP251X=y
+CONFIG_NET_DSA_BCM_SF2=m
+CONFIG_CAN_SUN4I=y
+CONFIG_BT=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_MRVL_SDIO=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_OMAP_OCP2SCP=y
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_SUNXI_RSB=m
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_BRCMNAND=y
+CONFIG_MTD_NAND_VF610_NFC=y
+CONFIG_MTD_NAND_DAVINCI=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_FSL_QUADSPI=m
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_VIRTIO_BLK=y
+CONFIG_AD525X_DPOT=y
+CONFIG_AD525X_DPOT_I2C=y
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ICS932S401=y
+CONFIG_ATMEL_SSC=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_APDS9802ALS=y
+CONFIG_ISL29003=y
+CONFIG_EEPROM_AT24=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_BRCM=y
+CONFIG_AHCI_ST=y
+CONFIG_AHCI_IMX=y
+CONFIG_AHCI_SUNXI=y
+CONFIG_AHCI_TEGRA=y
+CONFIG_SATA_HIGHBANK=y
+CONFIG_SATA_MV=y
+CONFIG_SATA_RCAR=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_SUN4I_EMAC=y
+CONFIG_MACB=y
+CONFIG_BCMGENET=m
+CONFIG_SYSTEMPORT=m
+CONFIG_NET_CALXEDA_XGMAC=y
+CONFIG_GIANFAR=y
+CONFIG_IGB=y
+CONFIG_MV643XX_ETH=y
+CONFIG_MVNETA=y
+CONFIG_PXA168_ETH=m
+CONFIG_KS8851=y
+CONFIG_R8169=y
+CONFIG_SH_ETH=y
+CONFIG_SMSC911X=y
+CONFIG_STMMAC_ETH=y
+CONFIG_SYNOPSYS_DWC_ETH_QOS=y
+CONFIG_TI_CPSW=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_AT803X_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_FIXED_PHY=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_BRCMFMAC=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_QT1070=m
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_TEGRA=y
+CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_ST_KEYSCAN=y
+CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_CYAPA=m
+CONFIG_MOUSE_ELAN_I2C=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_ST1232=m
+CONFIG_TOUCHSCREEN_STMPE=y
+CONFIG_TOUCHSCREEN_SUN4I=y
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MAX77693_HAPTIC=m
+CONFIG_INPUT_MAX8997_HAPTIC=m
+CONFIG_INPUT_MPU3050=y
+CONFIG_INPUT_AXP20X_PEK=m
+CONFIG_INPUT_ADXL34X=m
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_EM=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_TTYAT=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_SIRFSOC=y
+CONFIG_SERIAL_SIRFSOC_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_IMX=y
+CONFIG_SERIAL_IMX_CONSOLE=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=20
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_VT8500=y
+CONFIG_SERIAL_VT8500_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_FSL_LPUART=y
+CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
+CONFIG_SERIAL_CONEXANT_DIGICOLOR=y
+CONFIG_SERIAL_CONEXANT_DIGICOLOR_CONSOLE=y
+CONFIG_SERIAL_ST_ASC=y
+CONFIG_SERIAL_ST_ASC_CONSOLE=y
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DAVINCI=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_ARB_GPIO_CHALLENGE=m
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_MUX_PINCTRL=y
+CONFIG_I2C_DEMUX_PINCTRL=y
+CONFIG_I2C_AT91=m
+CONFIG_I2C_BCM2835=y
+CONFIG_I2C_CADENCE=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_DIGICOLOR=m
+CONFIG_I2C_EMEV2=m
+CONFIG_I2C_GPIO=m
+CONFIG_I2C_EXYNOS5=y
+CONFIG_I2C_IMX=m
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_RIIC=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_SIRF=y
+CONFIG_I2C_ST=y
+CONFIG_I2C_SUN6I_P2WI=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_XILINX=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=m
+CONFIG_I2C_SLAVE_EEPROM=y
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=m
+CONFIG_SPI_BCM2835=y
+CONFIG_SPI_BCM2835AUX=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_DAVINCI=y
+CONFIG_SPI_GPIO=m
+CONFIG_SPI_FSL_DSPI=m
+CONFIG_SPI_OMAP24XX=y
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_ROCKCHIP=m
+CONFIG_SPI_RSPI=y
+CONFIG_SPI_S3C64XX=m
+CONFIG_SPI_SH_MSIOF=m
+CONFIG_SPI_SH_HSPI=y
+CONFIG_SPI_SIRF=y
+CONFIG_SPI_SUN4I=y
+CONFIG_SPI_SUN6I=y
+CONFIG_SPI_TEGRA114=y
+CONFIG_SPI_TEGRA20_SFLASH=y
+CONFIG_SPI_TEGRA20_SLINK=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_AS3722=y
+CONFIG_PINCTRL_PALMAS=y
+CONFIG_PINCTRL_BCM2835=y
+CONFIG_PINCTRL_APQ8064=y
+CONFIG_PINCTRL_APQ8084=y
+CONFIG_PINCTRL_IPQ8064=y
+CONFIG_PINCTRL_MSM8660=y
+CONFIG_PINCTRL_MSM8960=y
+CONFIG_PINCTRL_MSM8X74=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_GPIO_DAVINCI=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_EM=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCF857X=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_GPIO_PALMAS=y
+CONFIG_GPIO_SYSCON=y
+CONFIG_GPIO_TPS6586X=y
+CONFIG_GPIO_TPS65910=y
+CONFIG_BATTERY_ACT8945A=y
+CONFIG_BATTERY_SBS=y
+CONFIG_BATTERY_MAX17040=m
+CONFIG_BATTERY_MAX17042=m
+CONFIG_CHARGER_MAX14577=m
+CONFIG_CHARGER_MAX77693=m
+CONFIG_CHARGER_MAX8997=m
+CONFIG_CHARGER_MAX8998=m
+CONFIG_CHARGER_TPS65090=y
+CONFIG_AXP20X_POWER=m
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_KEYSTONE=y
+CONFIG_POWER_RESET_RMOBILE=y
+CONFIG_POWER_RESET_ST=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_LM95245=y
+CONFIG_SENSORS_NTC_THERMISTOR=m
+CONFIG_SENSORS_PWM_FAN=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_CPU_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=y
+CONFIG_RCAR_THERMAL=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_DAVINCI_WATCHDOG=m
+CONFIG_EXYNOS_THERMAL=m
+CONFIG_ST_THERMAL_SYSCFG=y
+CONFIG_ST_THERMAL_MEMMAP=y
+CONFIG_WATCHDOG=y
+CONFIG_DA9063_WATCHDOG=m
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_AT91SAM9X_WATCHDOG=y
+CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_ORION_WATCHDOG=y
+CONFIG_ST_LPC_WATCHDOG=y
+CONFIG_SUNXI_WATCHDOG=y
+CONFIG_IMX2_WDT=y
+CONFIG_TEGRA_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_DIGICOLOR_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCM_KONA_WDT=y
+CONFIG_MFD_ACT8945A=y
+CONFIG_MFD_AS3711=y
+CONFIG_MFD_AS3722=y
+CONFIG_MFD_ATMEL_FLEXCOM=y
+CONFIG_MFD_ATMEL_HLCDC=m
+CONFIG_MFD_BCM590XX=y
+CONFIG_MFD_AXP20X=y
+CONFIG_MFD_AXP20X_I2C=m
+CONFIG_MFD_AXP20X_RSB=m
+CONFIG_MFD_CROS_EC=m
+CONFIG_MFD_CROS_EC_I2C=m
+CONFIG_MFD_CROS_EC_SPI=m
+CONFIG_MFD_DA9063=m
+CONFIG_MFD_MAX14577=y
+CONFIG_MFD_MAX77686=y
+CONFIG_MFD_MAX77693=m
+CONFIG_MFD_MAX8907=y
+CONFIG_MFD_MAX8997=y
+CONFIG_MFD_MAX8998=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_PM8921_CORE=y
+CONFIG_MFD_QCOM_RPM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_STMPE=y
+CONFIG_MFD_PALMAS=y
+CONFIG_MFD_TPS65090=y
+CONFIG_MFD_TPS65217=y
+CONFIG_MFD_TPS65218=y
+CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_TPS65910=y
+CONFIG_REGULATOR_ACT8945A=y
+CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ANATOP=y
+CONFIG_REGULATOR_AS3711=y
+CONFIG_REGULATOR_AS3722=y
+CONFIG_REGULATOR_AXP20X=m
+CONFIG_REGULATOR_BCM590XX=y
+CONFIG_REGULATOR_DA9210=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_MFD_SYSCON=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_REGULATOR_LP872X=y
+CONFIG_REGULATOR_MAX14577=m
+CONFIG_REGULATOR_MAX8907=y
+CONFIG_REGULATOR_MAX8973=y
+CONFIG_REGULATOR_MAX8997=m
+CONFIG_REGULATOR_MAX8998=m
+CONFIG_REGULATOR_MAX77686=y
+CONFIG_REGULATOR_MAX77693=m
+CONFIG_REGULATOR_MAX77802=m
+CONFIG_REGULATOR_PALMAS=y
+CONFIG_REGULATOR_PBIAS=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_REGULATOR_S5M8767=y
+CONFIG_REGULATOR_TI_ABB=y
+CONFIG_REGULATOR_TPS51632=y
+CONFIG_REGULATOR_TPS62360=y
+CONFIG_REGULATOR_TPS65090=y
+CONFIG_REGULATOR_TPS65217=y
+CONFIG_REGULATOR_TPS65218=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_REGULATOR_TPS65910=y
+CONFIG_REGULATOR_TWL4030=y
+CONFIG_REGULATOR_VEXPRESS=y
+CONFIG_REGULATOR_WM8994=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
+CONFIG_VIDEO_S5P_FIMC=m
+CONFIG_VIDEO_S5P_MIPI_CSIS=m
+CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
+CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_STI_BDISP=m
+CONFIG_VIDEO_RENESAS_JPU=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_V4L_TEST_DRIVERS=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7180=m
+CONFIG_VIDEO_ML86V7667=m
+CONFIG_DRM=y
+CONFIG_DRM_I2C_ADV7511=m
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_MIXER=y
+CONFIG_DRM_EXYNOS_DPI=y
+CONFIG_DRM_EXYNOS_DSI=y
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=m
+CONFIG_ROCKCHIP_DW_HDMI=m
+CONFIG_ROCKCHIP_DW_MIPI_DSI=m
+CONFIG_ROCKCHIP_INNO_HDMI=m
+CONFIG_DRM_ATMEL_HLCDC=m
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_HDMI=y
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_SUN4I=m
+CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_STI=m
+CONFIG_DRM_VC4=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_EFI=y
+CONFIG_FB_WM8505=y
+CONFIG_FB_SH_MOBILE_LCDC=y
+CONFIG_FB_SIMPLE=y
+CONFIG_FB_SH_MOBILE_MERAM=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=m
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_AS3711=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_HDA_TEGRA=m
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
+CONFIG_SND_HDA_CODEC_HDMI=m
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_SOC=m
+CONFIG_SND_ATMEL_SOC=m
+CONFIG_SND_ATMEL_SOC_WM8904=m
+CONFIG_SND_ATMEL_SOC_PDMIC=m
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_FSL_SAI=m
+CONFIG_SND_SOC_ROCKCHIP=m
+CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
+CONFIG_SND_SOC_ROCKCHIP_MAX98090=m
+CONFIG_SND_SOC_ROCKCHIP_RT5645=m
+CONFIG_SND_SOC_SAMSUNG=m
+CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994=m
+CONFIG_SND_SOC_SMDK_WM8994_PCM=m
+CONFIG_SND_SOC_SNOW=m
+CONFIG_SND_SOC_SH4_FSI=m
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_RSRC_CARD=m
+CONFIG_SND_SUN4I_CODEC=m
+CONFIG_SND_SOC_TEGRA=m
+CONFIG_SND_SOC_TEGRA_RT5640=m
+CONFIG_SND_SOC_TEGRA_WM8753=m
+CONFIG_SND_SOC_TEGRA_WM8903=m
+CONFIG_SND_SOC_TEGRA_WM9712=m
+CONFIG_SND_SOC_TEGRA_TRIMSLICE=m
+CONFIG_SND_SOC_TEGRA_ALC5632=m
+CONFIG_SND_SOC_TEGRA_MAX98090=m
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_SGTL5000=m
+CONFIG_SND_SOC_SPDIF=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_MVEBU=y
+CONFIG_USB_XHCI_RCAR=m
+CONFIG_USB_XHCI_TEGRA=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=m
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_EHCI_HCD_STI=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_STI=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_EXYNOS=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=m
+CONFIG_USB_MUSB_SUNXI=m
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_AB8500_USB=y
+CONFIG_KEYSTONE_USB_PHY=y
+CONFIG_OMAP_USB3=y
+CONFIG_USB_GPIO_VBUS=y
+CONFIG_USB_ISP1301=y
+CONFIG_USB_MSM_OTG=m
+CONFIG_USB_MXS_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_FSL_USB2=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_ETH=m
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_AT91=y
+CONFIG_MMC_SDHCI_OF_ESDHC=m
+CONFIG_MMC_SDHCI_ESDHC_IMX=y
+CONFIG_MMC_SDHCI_DOVE=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_PXAV3=y
+CONFIG_MMC_SDHCI_SPEAR=y
+CONFIG_MMC_SDHCI_S3C=y
+CONFIG_MMC_SDHCI_S3C_DMA=y
+CONFIG_MMC_SDHCI_BCM_KONA=y
+CONFIG_MMC_SDHCI_ST=y
+CONFIG_MMC_OMAP=y
+CONFIG_MMC_OMAP_HS=y
+CONFIG_MMC_ATMELMCI=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_MVSDIO=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_PLTFM=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SH_MMCIF=y
+CONFIG_MMC_SUNXI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=m
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_MAX77693=m
+CONFIG_LEDS_MAX8997=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_HIGHBANK_MC=y
+CONFIG_EDAC_HIGHBANK_L2=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AS3722=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_HYM8563=m
+CONFIG_RTC_DRV_MAX8907=y
+CONFIG_RTC_DRV_MAX8998=m
+CONFIG_RTC_DRV_MAX8997=m
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_RS5C372=m
+CONFIG_RTC_DRV_PALMAS=y
+CONFIG_RTC_DRV_ST_LPC=y
+CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_RTC_DRV_TPS65910=y
+CONFIG_RTC_DRV_S35390A=m
+CONFIG_RTC_DRV_RX8581=m
+CONFIG_RTC_DRV_EM3027=y
+CONFIG_RTC_DRV_DA9063=m
+CONFIG_RTC_DRV_EFI=m
+CONFIG_RTC_DRV_DIGICOLOR=m
+CONFIG_RTC_DRV_S5M=m
+CONFIG_RTC_DRV_S3C=m
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_AT91RM9200=m
+CONFIG_RTC_DRV_AT91SAM9=m
+CONFIG_RTC_DRV_VT8500=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_SUNXI=y
+CONFIG_RTC_DRV_MV=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_AT_HDMAC=y
+CONFIG_AT_XDMAC=y
+CONFIG_FSL_EDMA=y
+CONFIG_MV_XOR=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_SH_DMAE=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_STE_DMA40=y
+CONFIG_SIRF_DMA=y
+CONFIG_TI_EDMA=y
+CONFIG_PL330_DMA=y
+CONFIG_IMX_SDMA=y
+CONFIG_IMX_DMA=y
+CONFIG_MXS_DMA=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_OMAP=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_DMA_SUN6I=y
+CONFIG_STAGING=y
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
+CONFIG_MFD_NVEC=y
+CONFIG_KEYBOARD_NVEC=y
+CONFIG_SERIO_NVEC_PS2=y
+CONFIG_NVEC_POWER=y
+CONFIG_NVEC_PAZ00=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_CHROME_PLATFORMS=y
+CONFIG_STAGING_BOARD=y
+CONFIG_CROS_EC_CHARDEV=m
+CONFIG_COMMON_CLK_MAX77686=y
+CONFIG_COMMON_CLK_MAX77802=m
+CONFIG_COMMON_CLK_RK808=m
+CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_APQ_MMCC_8084=y
+CONFIG_MSM_GCC_8660=y
+CONFIG_MSM_MMCC_8960=y
+CONFIG_MSM_MMCC_8974=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_GART=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_ARM_TEGRA_DEVFREQ=m
+CONFIG_MEMORY=y
+CONFIG_EXTCON=y
+CONFIG_TI_AEMIF=y
+CONFIG_IIO=y
+CONFIG_AT91_ADC=m
+CONFIG_AT91_SAMA5D2_ADC=m
+CONFIG_BERLIN2_ADC=m
+CONFIG_EXYNOS_ADC=m
+CONFIG_VF610_ADC=m
+CONFIG_XILINX_XADC=y
+CONFIG_CM36651=m
+CONFIG_AK8975=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_PWM=y
+CONFIG_PWM_ATMEL=m
+CONFIG_PWM_ATMEL_HLCDC_PWM=m
+CONFIG_PWM_ATMEL_TCB=m
+CONFIG_PWM_FSL_FTM=m
+CONFIG_PWM_RENESAS_TPU=y
+CONFIG_PWM_ROCKCHIP=m
+CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_SUN4I=y
+CONFIG_PWM_TEGRA=y
+CONFIG_PWM_VT8500=y
+CONFIG_PHY_HIX5HD2_SATA=y
+CONFIG_E1000E=y
+CONFIG_PWM_STI=y
+CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
+CONFIG_OMAP_USB2=y
+CONFIG_TI_PIPE3=y
+CONFIG_PHY_BERLIN_USB=y
+CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_ROCKCHIP_DP=m
+CONFIG_PHY_ROCKCHIP_USB=m
+CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_MIPHY28LP=y
+CONFIG_PHY_MIPHY365X=y
+CONFIG_PHY_RCAR_GEN2=m
+CONFIG_PHY_STIH41X_USB=y
+CONFIG_PHY_STIH407_USB=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
+CONFIG_PHY_SAMSUNG_USB2=m
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_PHY_BRCM_SATA=y
+CONFIG_NVMEM=y
+CONFIG_NVMEM_SUNXI_SID=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EXT4_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_CRYPTO_DEV_TEGRA_AES=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_KEYSTONE_IRQ=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=m
+CONFIG_CRYPTO_DEV_S5P=m
+CONFIG_CRYPTO_DEV_SUN4I_SS=m
+CONFIG_CRYPTO_DEV_ROCKCHIP=m
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM=m
+CONFIG_CRYPTO_SHA1_ARM_NEON=m
+CONFIG_CRYPTO_SHA1_ARM_CE=m
+CONFIG_CRYPTO_SHA2_ARM_CE=m
+CONFIG_CRYPTO_SHA256_ARM=m
+CONFIG_CRYPTO_SHA512_ARM=m
+CONFIG_CRYPTO_AES_ARM=m
+CONFIG_CRYPTO_AES_ARM_BS=m
+CONFIG_CRYPTO_AES_ARM_CE=m
+CONFIG_CRYPTO_GHASH_ARM_CE=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=m
+CONFIG_CRYPTO_DEV_ATMEL_TDES=m
+CONFIG_CRYPTO_DEV_ATMEL_SHA=m
+CONFIG_VIDEO_VIVID=m
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_PCI_LEGACY=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368ad023..cc414382dab4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
#ifndef CONFIG_SMP
preempt_disable();
#endif
@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d5423ab15ed5..f4dab20ac9f3 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -78,6 +78,9 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
int max_vcpus;
+
+ /* Mandated version of PSCI */
+ u32 psci_version;
};
#define KVM_NR_MEM_OBJS 40
@@ -318,4 +321,10 @@ static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
return -ENXIO;
}
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+ /* No way to detect it yet, pretend it is not there. */
+ return false;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a58bbaa3ec60..d10e36235438 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -223,6 +223,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8;
}
+static inline void *kvm_get_hyp_vector(void)
+{
+ return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return 0;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
deleted file mode 100644
index 6bda945d31fa..000000000000
--- a/arch/arm/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_KVM_PSCI_H__
-#define __ARM_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1 1
-#define KVM_ARM_PSCI_0_2 2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 71e473d05fcc..620dc75362e5 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
atomic64_t, \
counter), (val))
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c73579..0b8cf31d8416 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -173,6 +173,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 3f1759411d51..414e60ed0257 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -29,11 +29,6 @@
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-#ifdef CONFIG_OLD_MCOUNT
-#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-
-#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
static int __ftrace_modify_code(void *data)
{
@@ -51,6 +46,12 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
+#ifdef CONFIG_OLD_MCOUNT
+#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
+#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
+
+#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
+
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return rec->arch.old_mcount ? OLD_NOP : NOP;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index b09b0cd818a2..cf7456a22a73 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -29,6 +29,7 @@
#include <linux/kvm.h>
#include <trace/events/kvm.h>
#include <kvm/arm_pmu.h>
+#include <kvm/arm_psci.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -44,7 +45,6 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
-#include <asm/kvm_psci.h>
#include <asm/sections.h>
#ifdef REQUIRES_VIRT
@@ -1088,7 +1088,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+ vector_ptr = (unsigned long)kvm_get_hyp_vector();
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
@@ -1345,6 +1345,13 @@ static int init_hyp_mode(void)
goto out_err;
}
+
+ err = kvm_map_vectors();
+ if (err) {
+ kvm_err("Cannot map vectors\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 9aca92074f85..630117d2e8bd 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ + kvm_arm_get_fw_num_regs(vcpu)
+ NUM_TIMER_REGS;
}
@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
+ ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += kvm_arm_get_fw_num_regs(vcpu);
+
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_get_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_set_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e57ebca6e69..de1aedce2a8b 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -21,7 +21,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
-#include <asm/kvm_psci.h>
+#include <kvm/arm_psci.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -36,7 +36,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
- ret = kvm_psci_call(vcpu);
+ ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 92eab1d51785..61049216e4d5 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -6,6 +6,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
+CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
+
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
@@ -14,7 +16,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
+CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
+
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
index 111bda8cdebd..be4b8b0a40ad 100644
--- a/arch/arm/kvm/hyp/banked-sr.c
+++ b/arch/arm/kvm/hyp/banked-sr.c
@@ -20,6 +20,10 @@
#include <asm/kvm_hyp.h>
+/*
+ * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
+ * trick the assembler.
+ */
__asm__(".arch_extension virt");
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 624a510d31df..ebd2dd46adf7 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -237,8 +237,10 @@ void __hyp_text __noreturn __hyp_panic(int cause)
vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __timer_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
+ __banked_restore_state(host_ctxt);
__sysreg_restore_state(host_ctxt);
}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 2a35c1963f6d..7f868d9bb5ed 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1284,7 +1284,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (vma_kernel_pagesize(vma) && !logging_active) {
+ if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
hugetlb = true;
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
} else {
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index a08d7a93aebb..8a9c654f4f87 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -15,16 +15,17 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/preempt.h>
#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_psci.h>
#include <asm/kvm_host.h>
-#include <uapi/linux/psci.h>
+#include <kvm/arm_psci.h>
/*
* This is an implementation of the Power State Coordination Interface
@@ -33,6 +34,38 @@
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
+static u32 smccc_get_function(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 0);
+}
+
+static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 1);
+}
+
+static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 2);
+}
+
+static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 3);
+}
+
+static void smccc_set_retval(struct kvm_vcpu *vcpu,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3)
+{
+ vcpu_set_reg(vcpu, 0, a0);
+ vcpu_set_reg(vcpu, 1, a1);
+ vcpu_set_reg(vcpu, 2, a2);
+ vcpu_set_reg(vcpu, 3, a3);
+}
+
static unsigned long psci_affinity_mask(unsigned long affinity_level)
{
if (affinity_level <= 3)
@@ -75,7 +108,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
unsigned long context_id;
phys_addr_t target_pc;
- cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+ cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
@@ -88,14 +121,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
if (!vcpu->arch.power_off) {
- if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+ if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
}
- target_pc = vcpu_get_reg(source_vcpu, 2);
- context_id = vcpu_get_reg(source_vcpu, 3);
+ target_pc = smccc_get_arg2(source_vcpu);
+ context_id = smccc_get_arg3(source_vcpu);
kvm_reset_vcpu(vcpu);
@@ -114,7 +147,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
- vcpu_set_reg(vcpu, 0, context_id);
+ smccc_set_retval(vcpu, context_id, 0, 0, 0);
vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
@@ -134,8 +167,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
- target_affinity = vcpu_get_reg(vcpu, 1);
- lowest_affinity_level = vcpu_get_reg(vcpu, 2);
+ target_affinity = smccc_get_arg1(vcpu);
+ lowest_affinity_level = smccc_get_arg2(vcpu);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -198,18 +231,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
}
-int kvm_psci_version(struct kvm_vcpu *vcpu)
-{
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
- return KVM_ARM_PSCI_0_2;
-
- return KVM_ARM_PSCI_0_1;
-}
-
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long psci_fn = smccc_get_function(vcpu);
unsigned long val;
int ret = 1;
@@ -219,7 +244,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
* Bits[31:16] = Major Version = 0
* Bits[15:0] = Minor Version = 2
*/
- val = 2;
+ val = KVM_ARM_PSCI_0_2;
break;
case PSCI_0_2_FN_CPU_SUSPEND:
case PSCI_0_2_FN64_CPU_SUSPEND:
@@ -276,14 +301,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
- vcpu_set_reg(vcpu, 0, val);
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return ret;
+}
+
+static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
+{
+ u32 psci_fn = smccc_get_function(vcpu);
+ u32 feature;
+ unsigned long val;
+ int ret = 1;
+
+ switch(psci_fn) {
+ case PSCI_0_2_FN_PSCI_VERSION:
+ val = KVM_ARM_PSCI_1_0;
+ break;
+ case PSCI_1_0_FN_PSCI_FEATURES:
+ feature = smccc_get_arg1(vcpu);
+ switch(feature) {
+ case PSCI_0_2_FN_PSCI_VERSION:
+ case PSCI_0_2_FN_CPU_SUSPEND:
+ case PSCI_0_2_FN64_CPU_SUSPEND:
+ case PSCI_0_2_FN_CPU_OFF:
+ case PSCI_0_2_FN_CPU_ON:
+ case PSCI_0_2_FN64_CPU_ON:
+ case PSCI_0_2_FN_AFFINITY_INFO:
+ case PSCI_0_2_FN64_AFFINITY_INFO:
+ case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ case PSCI_0_2_FN_SYSTEM_OFF:
+ case PSCI_0_2_FN_SYSTEM_RESET:
+ case PSCI_1_0_FN_PSCI_FEATURES:
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = 0;
+ break;
+ default:
+ val = PSCI_RET_NOT_SUPPORTED;
+ break;
+ }
+ break;
+ default:
+ return kvm_psci_0_2_call(vcpu);
+ }
+
+ smccc_set_retval(vcpu, val, 0, 0, 0);
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long psci_fn = smccc_get_function(vcpu);
unsigned long val;
switch (psci_fn) {
@@ -301,7 +368,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
break;
}
- vcpu_set_reg(vcpu, 0, val);
+ smccc_set_retval(vcpu, val, 0, 0, 0);
return 1;
}
@@ -319,9 +386,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
* Errors:
* -EINVAL: Unrecognized PSCI function
*/
-int kvm_psci_call(struct kvm_vcpu *vcpu)
+static int kvm_psci_call(struct kvm_vcpu *vcpu)
{
- switch (kvm_psci_version(vcpu)) {
+ switch (kvm_psci_version(vcpu, vcpu->kvm)) {
+ case KVM_ARM_PSCI_1_0:
+ return kvm_psci_1_0_call(vcpu);
case KVM_ARM_PSCI_0_2:
return kvm_psci_0_2_call(vcpu);
case KVM_ARM_PSCI_0_1:
@@ -330,3 +399,89 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
return -EINVAL;
};
}
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+{
+ u32 func_id = smccc_get_function(vcpu);
+ u32 val = PSCI_RET_NOT_SUPPORTED;
+ u32 feature;
+
+ switch (func_id) {
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = ARM_SMCCC_VERSION_1_1;
+ break;
+ case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
+ feature = smccc_get_arg1(vcpu);
+ switch(feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ if (kvm_arm_harden_branch_predictor())
+ val = 0;
+ break;
+ }
+ break;
+ default:
+ return kvm_psci_call(vcpu);
+ }
+
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+}
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+{
+ return 1; /* PSCI version */
+}
+
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_psci_version(vcpu, vcpu->kvm);
+ if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ bool wants_02;
+ u64 val;
+
+ if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+
+ switch (val) {
+ case KVM_ARM_PSCI_0_1:
+ if (wants_02)
+ return -EINVAL;
+ vcpu->kvm->arch.psci_version = val;
+ return 0;
+ case KVM_ARM_PSCI_0_2:
+ case KVM_ARM_PSCI_1_0:
+ if (!wants_02)
+ return -EINVAL;
+ vcpu->kvm->arch.psci_version = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1712f132b80d..b83fdc06286a 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -85,7 +85,11 @@
.pushsection .text.fixup,"ax"
.align 4
9001: mov r4, #-EFAULT
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ ldr r5, [sp, #9*4] @ *err_ptr
+#else
ldr r5, [sp, #8*4] @ *err_ptr
+#endif
str r4, [r5]
ldmia sp, {r1, r2} @ retrieve dst, len
add r2, r2, r1
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index a0e66d8200c5..403db76e3497 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -199,6 +199,7 @@ config ARCH_BRCMSTB
select BRCMSTB_L2_IRQ
select BCM7120_L2_IRQ
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
+ select ZONE_DMA if ARM_LPAE
select SOC_BRCMSTB
select SOC_BUS
help
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index add3771d38f6..9a22d40602aa 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -821,6 +821,8 @@ static struct platform_device da8xx_dsp = {
.resource = da8xx_rproc_resources,
};
+static bool rproc_mem_inited __initdata;
+
#if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)
static phys_addr_t rproc_base __initdata;
@@ -859,6 +861,8 @@ void __init da8xx_rproc_reserve_cma(void)
ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
if (ret)
pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
+ else
+ rproc_mem_inited = true;
}
#else
@@ -873,6 +877,12 @@ int __init da8xx_register_rproc(void)
{
int ret;
+ if (!rproc_mem_inited) {
+ pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
ret = platform_device_register(&da8xx_dsp);
if (ret)
pr_err("%s: can't register DSP device: %d\n", __func__, ret);
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index b3347d32349f..94906ed49392 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -131,6 +131,9 @@ struct device * __init imx_soc_device_init(void)
case MXC_CPU_IMX6UL:
soc_id = "i.MX6UL";
break;
+ case MXC_CPU_IMX6ULL:
+ soc_id = "i.MX6ULL";
+ break;
case MXC_CPU_IMX7D:
soc_id = "i.MX7D";
break;
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index 34f2ff62583c..e00d6260c3df 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -39,6 +39,7 @@
#define MXC_CPU_IMX6SX 0x62
#define MXC_CPU_IMX6Q 0x63
#define MXC_CPU_IMX6UL 0x64
+#define MXC_CPU_IMX6ULL 0x65
#define MXC_CPU_IMX7D 0x72
#define IMX_DDR_TYPE_LPDDR2 1
@@ -73,6 +74,11 @@ static inline bool cpu_is_imx6ul(void)
return __mxc_cpu_type == MXC_CPU_IMX6UL;
}
+static inline bool cpu_is_imx6ull(void)
+{
+ return __mxc_cpu_type == MXC_CPU_IMX6ULL;
+}
+
static inline bool cpu_is_imx6q(void)
{
return __mxc_cpu_type == MXC_CPU_IMX6Q;
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 541647f57192..895c0746fe50 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -42,7 +42,7 @@ config MACH_ARMADA_375
depends on ARCH_MULTI_V7
select ARMADA_370_XP_IRQ
select ARM_ERRATA_720789
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_GIC
select ARMADA_375_CLK
select HAVE_ARM_SCU
@@ -58,7 +58,7 @@ config MACH_ARMADA_38X
bool "Marvell Armada 380/385 boards"
depends on ARCH_MULTI_V7
select ARM_ERRATA_720789
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_GIC
select ARMADA_370_XP_IRQ
select ARMADA_38X_CLK
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index ef9ed36e8a61..e3416b40e82b 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = {
.dep_bit = DRA7XX_PCIE_STATDEP_SHIFT,
.wkdep_srcs = pcie_wkup_sleep_deps,
.sleepdep_srcs = pcie_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain atl_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index 9ff92050053c..fa7f308c9027 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -73,6 +73,7 @@ phys_addr_t omap_secure_ram_mempool_base(void)
return omap_secure_memblock_base;
}
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
u32 omap3_save_secure_ram(void __iomem *addr, int size)
{
u32 ret;
@@ -91,6 +92,7 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
return ret;
}
+#endif
/**
* rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index da0b33deba6d..5629d7580973 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
*/
static int vfp_dying_cpu(unsigned int cpu)
{
- vfp_force_reload(cpu, current_thread_info());
+ vfp_current_hw_state[cpu] = NULL;
return 0;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 86e3ab630aa6..7023e9bbc401 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -429,6 +429,20 @@ config ARM64_ERRATUM_843419
If unsure, say Y.
+config ARM64_ERRATUM_1024718
+ bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
+ default y
+ help
+ This option adds work around for Arm Cortex-A55 Erratum 1024718.
+
+ Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
+ update of the hardware dirty bit when the DBM/AP bits are updated
+ without a break-before-make. The work around is to disable the usage
+ of hardware DBM locally on the affected cores. CPUs not affected by
+ erratum will continue to use the feature.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -746,6 +760,35 @@ config FORCE_MAX_ZONEORDER
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
4M allocations matching the default size used by generic code.
+config UNMAP_KERNEL_AT_EL0
+ bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors can
+ be used to bypass MMU permission checks and leak kernel data to
+ userspace. This can be defended against by unmapping the kernel
+ when running in userspace, mapping it back in on exception entry
+ via a trampoline page in the vector table.
+
+ If unsure, say Y.
+
+config HARDEN_BRANCH_PREDICTOR
+ bool "Harden the branch predictor against aliasing attacks" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors rely on
+ being able to manipulate the branch predictor for a victim context by
+ executing aliasing branches in the attacker context. Such attacks
+ can be partially mitigated against by clearing internal branch
+ predictor state and limiting the prediction logic in some situations.
+
+ This config option will take CPU-specific actions to harden the
+ branch predictor against aliasing attacks and may rely on specific
+ instruction sequences or control bits being set by the system
+ firmware.
+
+ If unsure, say Y.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index 9217da983525..53d03cb144e4 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -36,9 +36,8 @@
enable-method = "psci";
};
- L2_CA57: cache-controller@0 {
+ L2_CA57: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7796_PD_CA57_SCU>;
cache-unified;
cache-level = <2>;
diff --git a/arch/arm64/crypto/sha256-core.S b/arch/arm64/crypto/sha256-core.S
new file mode 100644
index 000000000000..3ce82cc860bc
--- /dev/null
+++ b/arch/arm64/crypto/sha256-core.S
@@ -0,0 +1,2061 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License"). You may not use
+// this file except in compliance with the License. You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+// SHA256-hw SHA256(*) SHA512
+// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+// Denver 2.01 10.5 (+26%) 6.70 (+8%)
+// X-Gene 20.0 (+100%) 12.8 (+300%(***))
+// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+//
+// (*) Software SHA256 results are of lesser relevance, presented
+// mostly for informational purposes.
+// (**) The result is a trade-off: it's possible to improve it by
+// 10% (or by 1 cycle per round), but at the cost of 20% loss
+// on Cortex-A53 (or by 4 cycles per round).
+// (***) Super-impressive coefficients over gcc-generated code are
+// indication of some compiler "pathology", most notably code
+// generated with -mgeneral-regs-only is significanty faster
+// and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern OPENSSL_armcap_P
+.globl sha256_block_data_order
+.type sha256_block_data_order,%function
+.align 6
+sha256_block_data_order:
+#ifndef __KERNEL__
+# ifdef __ILP32__
+ ldrsw x16,.LOPENSSL_armcap_P
+# else
+ ldr x16,.LOPENSSL_armcap_P
+# endif
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA256
+ b.ne .Lv8_entry
+ tst w16,#ARMV7_NEON
+ b.ne .Lneon_entry
+#endif
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*4
+
+ ldp w20,w21,[x0] // load context
+ ldp w22,w23,[x0,#2*4]
+ ldp w24,w25,[x0,#4*4]
+ add x2,x1,x2,lsl#6 // end of input
+ ldp w26,w27,[x0,#6*4]
+ adr x30,.LK256
+ stp x0,x2,[x29,#96]
+
+.Loop:
+ ldp w3,w4,[x1],#2*4
+ ldr w19,[x30],#4 // *K++
+ eor w28,w21,w22 // magic seed
+ str x1,[x29,#112]
+#ifndef __AARCH64EB__
+ rev w3,w3 // 0
+#endif
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ eor w6,w24,w24,ror#14
+ and w17,w25,w24
+ bic w19,w26,w24
+ add w27,w27,w3 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w6,ror#11 // Sigma1(e)
+ ror w6,w20,#2
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ eor w17,w20,w20,ror#9
+ add w27,w27,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w23,w23,w27 // d+=h
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w6,w17,ror#13 // Sigma0(a)
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w27,w27,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w4,w4 // 1
+#endif
+ ldp w5,w6,[x1],#2*4
+ add w27,w27,w17 // h+=Sigma0(a)
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ eor w7,w23,w23,ror#14
+ and w17,w24,w23
+ bic w28,w25,w23
+ add w26,w26,w4 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w7,ror#11 // Sigma1(e)
+ ror w7,w27,#2
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ eor w17,w27,w27,ror#9
+ add w26,w26,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w22,w22,w26 // d+=h
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w7,w17,ror#13 // Sigma0(a)
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w26,w26,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w5,w5 // 2
+#endif
+ add w26,w26,w17 // h+=Sigma0(a)
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ eor w8,w22,w22,ror#14
+ and w17,w23,w22
+ bic w19,w24,w22
+ add w25,w25,w5 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w8,ror#11 // Sigma1(e)
+ ror w8,w26,#2
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ eor w17,w26,w26,ror#9
+ add w25,w25,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w21,w21,w25 // d+=h
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w8,w17,ror#13 // Sigma0(a)
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w25,w25,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w6,w6 // 3
+#endif
+ ldp w7,w8,[x1],#2*4
+ add w25,w25,w17 // h+=Sigma0(a)
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ eor w9,w21,w21,ror#14
+ and w17,w22,w21
+ bic w28,w23,w21
+ add w24,w24,w6 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w9,ror#11 // Sigma1(e)
+ ror w9,w25,#2
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ eor w17,w25,w25,ror#9
+ add w24,w24,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w20,w20,w24 // d+=h
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w9,w17,ror#13 // Sigma0(a)
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w24,w24,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w7,w7 // 4
+#endif
+ add w24,w24,w17 // h+=Sigma0(a)
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ eor w10,w20,w20,ror#14
+ and w17,w21,w20
+ bic w19,w22,w20
+ add w23,w23,w7 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w10,ror#11 // Sigma1(e)
+ ror w10,w24,#2
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ eor w17,w24,w24,ror#9
+ add w23,w23,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w27,w27,w23 // d+=h
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w10,w17,ror#13 // Sigma0(a)
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w23,w23,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w8,w8 // 5
+#endif
+ ldp w9,w10,[x1],#2*4
+ add w23,w23,w17 // h+=Sigma0(a)
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ eor w11,w27,w27,ror#14
+ and w17,w20,w27
+ bic w28,w21,w27
+ add w22,w22,w8 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w11,ror#11 // Sigma1(e)
+ ror w11,w23,#2
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ eor w17,w23,w23,ror#9
+ add w22,w22,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w26,w26,w22 // d+=h
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w11,w17,ror#13 // Sigma0(a)
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w22,w22,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w9,w9 // 6
+#endif
+ add w22,w22,w17 // h+=Sigma0(a)
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ eor w12,w26,w26,ror#14
+ and w17,w27,w26
+ bic w19,w20,w26
+ add w21,w21,w9 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w12,ror#11 // Sigma1(e)
+ ror w12,w22,#2
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ eor w17,w22,w22,ror#9
+ add w21,w21,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w25,w25,w21 // d+=h
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w12,w17,ror#13 // Sigma0(a)
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w21,w21,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w10,w10 // 7
+#endif
+ ldp w11,w12,[x1],#2*4
+ add w21,w21,w17 // h+=Sigma0(a)
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ eor w13,w25,w25,ror#14
+ and w17,w26,w25
+ bic w28,w27,w25
+ add w20,w20,w10 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w13,ror#11 // Sigma1(e)
+ ror w13,w21,#2
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ eor w17,w21,w21,ror#9
+ add w20,w20,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w24,w24,w20 // d+=h
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w13,w17,ror#13 // Sigma0(a)
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w20,w20,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w11,w11 // 8
+#endif
+ add w20,w20,w17 // h+=Sigma0(a)
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ eor w14,w24,w24,ror#14
+ and w17,w25,w24
+ bic w19,w26,w24
+ add w27,w27,w11 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w14,ror#11 // Sigma1(e)
+ ror w14,w20,#2
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ eor w17,w20,w20,ror#9
+ add w27,w27,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w23,w23,w27 // d+=h
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w14,w17,ror#13 // Sigma0(a)
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w27,w27,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w12,w12 // 9
+#endif
+ ldp w13,w14,[x1],#2*4
+ add w27,w27,w17 // h+=Sigma0(a)
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ eor w15,w23,w23,ror#14
+ and w17,w24,w23
+ bic w28,w25,w23
+ add w26,w26,w12 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w15,ror#11 // Sigma1(e)
+ ror w15,w27,#2
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ eor w17,w27,w27,ror#9
+ add w26,w26,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w22,w22,w26 // d+=h
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w15,w17,ror#13 // Sigma0(a)
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w26,w26,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w13,w13 // 10
+#endif
+ add w26,w26,w17 // h+=Sigma0(a)
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ eor w0,w22,w22,ror#14
+ and w17,w23,w22
+ bic w19,w24,w22
+ add w25,w25,w13 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w0,ror#11 // Sigma1(e)
+ ror w0,w26,#2
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ eor w17,w26,w26,ror#9
+ add w25,w25,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w21,w21,w25 // d+=h
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w0,w17,ror#13 // Sigma0(a)
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w25,w25,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w14,w14 // 11
+#endif
+ ldp w15,w0,[x1],#2*4
+ add w25,w25,w17 // h+=Sigma0(a)
+ str w6,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ eor w6,w21,w21,ror#14
+ and w17,w22,w21
+ bic w28,w23,w21
+ add w24,w24,w14 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w6,ror#11 // Sigma1(e)
+ ror w6,w25,#2
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ eor w17,w25,w25,ror#9
+ add w24,w24,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w20,w20,w24 // d+=h
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w6,w17,ror#13 // Sigma0(a)
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w24,w24,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w15,w15 // 12
+#endif
+ add w24,w24,w17 // h+=Sigma0(a)
+ str w7,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ eor w7,w20,w20,ror#14
+ and w17,w21,w20
+ bic w19,w22,w20
+ add w23,w23,w15 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w7,ror#11 // Sigma1(e)
+ ror w7,w24,#2
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ eor w17,w24,w24,ror#9
+ add w23,w23,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w27,w27,w23 // d+=h
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w7,w17,ror#13 // Sigma0(a)
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w23,w23,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w0,w0 // 13
+#endif
+ ldp w1,w2,[x1]
+ add w23,w23,w17 // h+=Sigma0(a)
+ str w8,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ eor w8,w27,w27,ror#14
+ and w17,w20,w27
+ bic w28,w21,w27
+ add w22,w22,w0 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w8,ror#11 // Sigma1(e)
+ ror w8,w23,#2
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ eor w17,w23,w23,ror#9
+ add w22,w22,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w26,w26,w22 // d+=h
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w8,w17,ror#13 // Sigma0(a)
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w22,w22,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w1,w1 // 14
+#endif
+ ldr w6,[sp,#12]
+ add w22,w22,w17 // h+=Sigma0(a)
+ str w9,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ eor w9,w26,w26,ror#14
+ and w17,w27,w26
+ bic w19,w20,w26
+ add w21,w21,w1 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w9,ror#11 // Sigma1(e)
+ ror w9,w22,#2
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ eor w17,w22,w22,ror#9
+ add w21,w21,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w25,w25,w21 // d+=h
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w9,w17,ror#13 // Sigma0(a)
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w21,w21,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w2,w2 // 15
+#endif
+ ldr w7,[sp,#0]
+ add w21,w21,w17 // h+=Sigma0(a)
+ str w10,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w9,w4,#7
+ and w17,w26,w25
+ ror w8,w1,#17
+ bic w28,w27,w25
+ ror w10,w21,#2
+ add w20,w20,w2 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w9,w9,w4,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w10,w10,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w8,w8,w1,ror#19
+ eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w10,w21,ror#22 // Sigma0(a)
+ eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+ add w3,w3,w12
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w3,w3,w9
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w3,w3,w8
+.Loop_16_xx:
+ ldr w8,[sp,#4]
+ str w11,[sp,#0]
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ ror w10,w5,#7
+ and w17,w25,w24
+ ror w9,w2,#17
+ bic w19,w26,w24
+ ror w11,w20,#2
+ add w27,w27,w3 // h+=X[i]
+ eor w16,w16,w24,ror#11
+ eor w10,w10,w5,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w24,ror#25 // Sigma1(e)
+ eor w11,w11,w20,ror#13
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w9,w9,w2,ror#19
+ eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
+ add w27,w27,w16 // h+=Sigma1(e)
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w11,w20,ror#22 // Sigma0(a)
+ eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
+ add w4,w4,w13
+ add w23,w23,w27 // d+=h
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w4,w4,w10
+ add w27,w27,w17 // h+=Sigma0(a)
+ add w4,w4,w9
+ ldr w9,[sp,#8]
+ str w12,[sp,#4]
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ ror w11,w6,#7
+ and w17,w24,w23
+ ror w10,w3,#17
+ bic w28,w25,w23
+ ror w12,w27,#2
+ add w26,w26,w4 // h+=X[i]
+ eor w16,w16,w23,ror#11
+ eor w11,w11,w6,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w23,ror#25 // Sigma1(e)
+ eor w12,w12,w27,ror#13
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w10,w10,w3,ror#19
+ eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
+ add w26,w26,w16 // h+=Sigma1(e)
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w12,w27,ror#22 // Sigma0(a)
+ eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
+ add w5,w5,w14
+ add w22,w22,w26 // d+=h
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w5,w5,w11
+ add w26,w26,w17 // h+=Sigma0(a)
+ add w5,w5,w10
+ ldr w10,[sp,#12]
+ str w13,[sp,#8]
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ ror w12,w7,#7
+ and w17,w23,w22
+ ror w11,w4,#17
+ bic w19,w24,w22
+ ror w13,w26,#2
+ add w25,w25,w5 // h+=X[i]
+ eor w16,w16,w22,ror#11
+ eor w12,w12,w7,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w22,ror#25 // Sigma1(e)
+ eor w13,w13,w26,ror#13
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w11,w11,w4,ror#19
+ eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
+ add w25,w25,w16 // h+=Sigma1(e)
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w13,w26,ror#22 // Sigma0(a)
+ eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
+ add w6,w6,w15
+ add w21,w21,w25 // d+=h
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w6,w6,w12
+ add w25,w25,w17 // h+=Sigma0(a)
+ add w6,w6,w11
+ ldr w11,[sp,#0]
+ str w14,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ ror w13,w8,#7
+ and w17,w22,w21
+ ror w12,w5,#17
+ bic w28,w23,w21
+ ror w14,w25,#2
+ add w24,w24,w6 // h+=X[i]
+ eor w16,w16,w21,ror#11
+ eor w13,w13,w8,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w21,ror#25 // Sigma1(e)
+ eor w14,w14,w25,ror#13
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w12,w12,w5,ror#19
+ eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
+ add w24,w24,w16 // h+=Sigma1(e)
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w14,w25,ror#22 // Sigma0(a)
+ eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
+ add w7,w7,w0
+ add w20,w20,w24 // d+=h
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w7,w7,w13
+ add w24,w24,w17 // h+=Sigma0(a)
+ add w7,w7,w12
+ ldr w12,[sp,#4]
+ str w15,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ ror w14,w9,#7
+ and w17,w21,w20
+ ror w13,w6,#17
+ bic w19,w22,w20
+ ror w15,w24,#2
+ add w23,w23,w7 // h+=X[i]
+ eor w16,w16,w20,ror#11
+ eor w14,w14,w9,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w20,ror#25 // Sigma1(e)
+ eor w15,w15,w24,ror#13
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w13,w13,w6,ror#19
+ eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
+ add w23,w23,w16 // h+=Sigma1(e)
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w15,w24,ror#22 // Sigma0(a)
+ eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
+ add w8,w8,w1
+ add w27,w27,w23 // d+=h
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w8,w8,w14
+ add w23,w23,w17 // h+=Sigma0(a)
+ add w8,w8,w13
+ ldr w13,[sp,#8]
+ str w0,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ ror w15,w10,#7
+ and w17,w20,w27
+ ror w14,w7,#17
+ bic w28,w21,w27
+ ror w0,w23,#2
+ add w22,w22,w8 // h+=X[i]
+ eor w16,w16,w27,ror#11
+ eor w15,w15,w10,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w27,ror#25 // Sigma1(e)
+ eor w0,w0,w23,ror#13
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w14,w14,w7,ror#19
+ eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
+ add w22,w22,w16 // h+=Sigma1(e)
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w0,w23,ror#22 // Sigma0(a)
+ eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
+ add w9,w9,w2
+ add w26,w26,w22 // d+=h
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w9,w9,w15
+ add w22,w22,w17 // h+=Sigma0(a)
+ add w9,w9,w14
+ ldr w14,[sp,#12]
+ str w1,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ ror w0,w11,#7
+ and w17,w27,w26
+ ror w15,w8,#17
+ bic w19,w20,w26
+ ror w1,w22,#2
+ add w21,w21,w9 // h+=X[i]
+ eor w16,w16,w26,ror#11
+ eor w0,w0,w11,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w26,ror#25 // Sigma1(e)
+ eor w1,w1,w22,ror#13
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w15,w15,w8,ror#19
+ eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
+ add w21,w21,w16 // h+=Sigma1(e)
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w1,w22,ror#22 // Sigma0(a)
+ eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
+ add w10,w10,w3
+ add w25,w25,w21 // d+=h
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w10,w10,w0
+ add w21,w21,w17 // h+=Sigma0(a)
+ add w10,w10,w15
+ ldr w15,[sp,#0]
+ str w2,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w1,w12,#7
+ and w17,w26,w25
+ ror w0,w9,#17
+ bic w28,w27,w25
+ ror w2,w21,#2
+ add w20,w20,w10 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w1,w1,w12,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w2,w2,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w0,w0,w9,ror#19
+ eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w2,w21,ror#22 // Sigma0(a)
+ eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
+ add w11,w11,w4
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w11,w11,w1
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w11,w11,w0
+ ldr w0,[sp,#4]
+ str w3,[sp,#0]
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ ror w2,w13,#7
+ and w17,w25,w24
+ ror w1,w10,#17
+ bic w19,w26,w24
+ ror w3,w20,#2
+ add w27,w27,w11 // h+=X[i]
+ eor w16,w16,w24,ror#11
+ eor w2,w2,w13,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w24,ror#25 // Sigma1(e)
+ eor w3,w3,w20,ror#13
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w1,w1,w10,ror#19
+ eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
+ add w27,w27,w16 // h+=Sigma1(e)
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w3,w20,ror#22 // Sigma0(a)
+ eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
+ add w12,w12,w5
+ add w23,w23,w27 // d+=h
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w12,w12,w2
+ add w27,w27,w17 // h+=Sigma0(a)
+ add w12,w12,w1
+ ldr w1,[sp,#8]
+ str w4,[sp,#4]
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ ror w3,w14,#7
+ and w17,w24,w23
+ ror w2,w11,#17
+ bic w28,w25,w23
+ ror w4,w27,#2
+ add w26,w26,w12 // h+=X[i]
+ eor w16,w16,w23,ror#11
+ eor w3,w3,w14,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w23,ror#25 // Sigma1(e)
+ eor w4,w4,w27,ror#13
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w2,w2,w11,ror#19
+ eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
+ add w26,w26,w16 // h+=Sigma1(e)
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w4,w27,ror#22 // Sigma0(a)
+ eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
+ add w13,w13,w6
+ add w22,w22,w26 // d+=h
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w13,w13,w3
+ add w26,w26,w17 // h+=Sigma0(a)
+ add w13,w13,w2
+ ldr w2,[sp,#12]
+ str w5,[sp,#8]
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ ror w4,w15,#7
+ and w17,w23,w22
+ ror w3,w12,#17
+ bic w19,w24,w22
+ ror w5,w26,#2
+ add w25,w25,w13 // h+=X[i]
+ eor w16,w16,w22,ror#11
+ eor w4,w4,w15,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w22,ror#25 // Sigma1(e)
+ eor w5,w5,w26,ror#13
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w3,w3,w12,ror#19
+ eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
+ add w25,w25,w16 // h+=Sigma1(e)
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w5,w26,ror#22 // Sigma0(a)
+ eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
+ add w14,w14,w7
+ add w21,w21,w25 // d+=h
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w14,w14,w4
+ add w25,w25,w17 // h+=Sigma0(a)
+ add w14,w14,w3
+ ldr w3,[sp,#0]
+ str w6,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ ror w5,w0,#7
+ and w17,w22,w21
+ ror w4,w13,#17
+ bic w28,w23,w21
+ ror w6,w25,#2
+ add w24,w24,w14 // h+=X[i]
+ eor w16,w16,w21,ror#11
+ eor w5,w5,w0,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w21,ror#25 // Sigma1(e)
+ eor w6,w6,w25,ror#13
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w4,w4,w13,ror#19
+ eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
+ add w24,w24,w16 // h+=Sigma1(e)
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w6,w25,ror#22 // Sigma0(a)
+ eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
+ add w15,w15,w8
+ add w20,w20,w24 // d+=h
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w15,w15,w5
+ add w24,w24,w17 // h+=Sigma0(a)
+ add w15,w15,w4
+ ldr w4,[sp,#4]
+ str w7,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ ror w6,w1,#7
+ and w17,w21,w20
+ ror w5,w14,#17
+ bic w19,w22,w20
+ ror w7,w24,#2
+ add w23,w23,w15 // h+=X[i]
+ eor w16,w16,w20,ror#11
+ eor w6,w6,w1,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w20,ror#25 // Sigma1(e)
+ eor w7,w7,w24,ror#13
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w5,w5,w14,ror#19
+ eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
+ add w23,w23,w16 // h+=Sigma1(e)
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w7,w24,ror#22 // Sigma0(a)
+ eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
+ add w0,w0,w9
+ add w27,w27,w23 // d+=h
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w0,w0,w6
+ add w23,w23,w17 // h+=Sigma0(a)
+ add w0,w0,w5
+ ldr w5,[sp,#8]
+ str w8,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ ror w7,w2,#7
+ and w17,w20,w27
+ ror w6,w15,#17
+ bic w28,w21,w27
+ ror w8,w23,#2
+ add w22,w22,w0 // h+=X[i]
+ eor w16,w16,w27,ror#11
+ eor w7,w7,w2,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w27,ror#25 // Sigma1(e)
+ eor w8,w8,w23,ror#13
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w6,w6,w15,ror#19
+ eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
+ add w22,w22,w16 // h+=Sigma1(e)
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w8,w23,ror#22 // Sigma0(a)
+ eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
+ add w1,w1,w10
+ add w26,w26,w22 // d+=h
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w1,w1,w7
+ add w22,w22,w17 // h+=Sigma0(a)
+ add w1,w1,w6
+ ldr w6,[sp,#12]
+ str w9,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ ror w8,w3,#7
+ and w17,w27,w26
+ ror w7,w0,#17
+ bic w19,w20,w26
+ ror w9,w22,#2
+ add w21,w21,w1 // h+=X[i]
+ eor w16,w16,w26,ror#11
+ eor w8,w8,w3,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w26,ror#25 // Sigma1(e)
+ eor w9,w9,w22,ror#13
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w7,w7,w0,ror#19
+ eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
+ add w21,w21,w16 // h+=Sigma1(e)
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w9,w22,ror#22 // Sigma0(a)
+ eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
+ add w2,w2,w11
+ add w25,w25,w21 // d+=h
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w2,w2,w8
+ add w21,w21,w17 // h+=Sigma0(a)
+ add w2,w2,w7
+ ldr w7,[sp,#0]
+ str w10,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w9,w4,#7
+ and w17,w26,w25
+ ror w8,w1,#17
+ bic w28,w27,w25
+ ror w10,w21,#2
+ add w20,w20,w2 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w9,w9,w4,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w10,w10,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w8,w8,w1,ror#19
+ eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w10,w21,ror#22 // Sigma0(a)
+ eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+ add w3,w3,w12
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w3,w3,w9
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w3,w3,w8
+ cbnz w19,.Loop_16_xx
+
+ ldp x0,x2,[x29,#96]
+ ldr x1,[x29,#112]
+ sub x30,x30,#260 // rewind
+
+ ldp w3,w4,[x0]
+ ldp w5,w6,[x0,#2*4]
+ add x1,x1,#14*4 // advance input pointer
+ ldp w7,w8,[x0,#4*4]
+ add w20,w20,w3
+ ldp w9,w10,[x0,#6*4]
+ add w21,w21,w4
+ add w22,w22,w5
+ add w23,w23,w6
+ stp w20,w21,[x0]
+ add w24,w24,w7
+ add w25,w25,w8
+ stp w22,w23,[x0,#2*4]
+ add w26,w26,w9
+ add w27,w27,w10
+ cmp x1,x2
+ stp w24,w25,[x0,#4*4]
+ stp w26,w27,[x0,#6*4]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*4
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size sha256_block_data_order,.-sha256_block_data_order
+
+.align 6
+.type .LK256,%object
+.LK256:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .long 0 //terminator
+.size .LK256,.-.LK256
+#ifndef __KERNEL__
+.align 3
+.LOPENSSL_armcap_P:
+# ifdef __ILP32__
+ .long OPENSSL_armcap_P-.
+# else
+ .quad OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#ifndef __KERNEL__
+.type sha256_block_armv8,%function
+.align 6
+sha256_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ ld1 {v0.4s,v1.4s},[x0]
+ adr x3,.LK256
+
+.Loop_hw:
+ ld1 {v4.16b-v7.16b},[x1],#64
+ sub x2,x2,#1
+ ld1 {v16.4s},[x3],#16
+ rev32 v4.16b,v4.16b
+ rev32 v5.16b,v5.16b
+ rev32 v6.16b,v6.16b
+ rev32 v7.16b,v7.16b
+ orr v18.16b,v0.16b,v0.16b // offload
+ orr v19.16b,v1.16b,v1.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+
+ ld1 {v17.4s},[x3]
+ add v16.4s,v16.4s,v6.4s
+ sub x3,x3,#64*4-16 // rewind
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+
+ add v17.4s,v17.4s,v7.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+
+ add v0.4s,v0.4s,v18.4s
+ add v1.4s,v1.4s,v19.4s
+
+ cbnz x2,.Loop_hw
+
+ st1 {v0.4s,v1.4s},[x0]
+
+ ldr x29,[sp],#16
+ ret
+.size sha256_block_armv8,.-sha256_block_armv8
+#endif
+#ifdef __KERNEL__
+.globl sha256_block_neon
+#endif
+.type sha256_block_neon,%function
+.align 4
+sha256_block_neon:
+.Lneon_entry:
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ sub sp,sp,#16*4
+
+ adr x16,.LK256
+ add x2,x1,x2,lsl#6 // len to point at the end of inp
+
+ ld1 {v0.16b},[x1], #16
+ ld1 {v1.16b},[x1], #16
+ ld1 {v2.16b},[x1], #16
+ ld1 {v3.16b},[x1], #16
+ ld1 {v4.4s},[x16], #16
+ ld1 {v5.4s},[x16], #16
+ ld1 {v6.4s},[x16], #16
+ ld1 {v7.4s},[x16], #16
+ rev32 v0.16b,v0.16b // yes, even on
+ rev32 v1.16b,v1.16b // big-endian
+ rev32 v2.16b,v2.16b
+ rev32 v3.16b,v3.16b
+ mov x17,sp
+ add v4.4s,v4.4s,v0.4s
+ add v5.4s,v5.4s,v1.4s
+ add v6.4s,v6.4s,v2.4s
+ st1 {v4.4s-v5.4s},[x17], #32
+ add v7.4s,v7.4s,v3.4s
+ st1 {v6.4s-v7.4s},[x17]
+ sub x17,x17,#32
+
+ ldp w3,w4,[x0]
+ ldp w5,w6,[x0,#8]
+ ldp w7,w8,[x0,#16]
+ ldp w9,w10,[x0,#24]
+ ldr w12,[sp,#0]
+ mov w13,wzr
+ eor w14,w4,w5
+ mov w15,wzr
+ b .L_00_48
+
+.align 4
+.L_00_48:
+ ext v4.16b,v0.16b,v1.16b,#4
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ bic w15,w9,w7
+ ext v7.16b,v2.16b,v3.16b,#4
+ eor w11,w7,w7,ror#5
+ add w3,w3,w13
+ mov d19,v3.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w3,w3,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w10,w10,w12
+ add v0.4s,v0.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w10,w10,w11
+ ldr w12,[sp,#4]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w6,w6,w10
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w4
+ ushr v16.4s,v19.4s,#17
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ sli v16.4s,v19.4s,#15
+ add w10,w10,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w9,w9,w12
+ ror w11,w11,#6
+ add v0.4s,v0.4s,v5.4s
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ sli v7.4s,v19.4s,#13
+ add w9,w9,w11
+ ldr w12,[sp,#8]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ eor v17.16b,v17.16b,v7.16b
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ add v0.4s,v0.4s,v17.4s
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ ushr v18.4s,v0.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v0.4s,#10
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ sli v18.4s,v0.4s,#15
+ add w8,w8,w12
+ ushr v17.4s,v0.4s,#19
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ sli v17.4s,v0.4s,#13
+ ldr w12,[sp,#12]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w4,w4,w8
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w10
+ eor v17.16b,v17.16b,v17.16b
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ mov v17.d[1],v19.d[0]
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ add v0.4s,v0.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add v4.4s,v4.4s,v0.4s
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#16]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ ext v4.16b,v1.16b,v2.16b,#4
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ bic w15,w5,w3
+ ext v7.16b,v3.16b,v0.16b,#4
+ eor w11,w3,w3,ror#5
+ add w7,w7,w13
+ mov d19,v0.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w7,w7,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w6,w6,w12
+ add v1.4s,v1.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w6,w6,w11
+ ldr w12,[sp,#20]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w10,w10,w6
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w8
+ ushr v16.4s,v19.4s,#17
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ sli v16.4s,v19.4s,#15
+ add w6,w6,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w5,w5,w12
+ ror w11,w11,#6
+ add v1.4s,v1.4s,v5.4s
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ sli v7.4s,v19.4s,#13
+ add w5,w5,w11
+ ldr w12,[sp,#24]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ eor v17.16b,v17.16b,v7.16b
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ add v1.4s,v1.4s,v17.4s
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ ushr v18.4s,v1.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v1.4s,#10
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ sli v18.4s,v1.4s,#15
+ add w4,w4,w12
+ ushr v17.4s,v1.4s,#19
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ sli v17.4s,v1.4s,#13
+ ldr w12,[sp,#28]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w8,w8,w4
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w6
+ eor v17.16b,v17.16b,v17.16b
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ mov v17.d[1],v19.d[0]
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ add v1.4s,v1.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add v4.4s,v4.4s,v1.4s
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ ldr w12,[sp,#32]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ ext v4.16b,v2.16b,v3.16b,#4
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ bic w15,w9,w7
+ ext v7.16b,v0.16b,v1.16b,#4
+ eor w11,w7,w7,ror#5
+ add w3,w3,w13
+ mov d19,v1.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w3,w3,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w10,w10,w12
+ add v2.4s,v2.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w10,w10,w11
+ ldr w12,[sp,#36]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w6,w6,w10
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w4
+ ushr v16.4s,v19.4s,#17
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ sli v16.4s,v19.4s,#15
+ add w10,w10,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w9,w9,w12
+ ror w11,w11,#6
+ add v2.4s,v2.4s,v5.4s
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ sli v7.4s,v19.4s,#13
+ add w9,w9,w11
+ ldr w12,[sp,#40]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ eor v17.16b,v17.16b,v7.16b
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ add v2.4s,v2.4s,v17.4s
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ ushr v18.4s,v2.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v2.4s,#10
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ sli v18.4s,v2.4s,#15
+ add w8,w8,w12
+ ushr v17.4s,v2.4s,#19
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ sli v17.4s,v2.4s,#13
+ ldr w12,[sp,#44]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w4,w4,w8
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w10
+ eor v17.16b,v17.16b,v17.16b
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ mov v17.d[1],v19.d[0]
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ add v2.4s,v2.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add v4.4s,v4.4s,v2.4s
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#48]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ ext v4.16b,v3.16b,v0.16b,#4
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ bic w15,w5,w3
+ ext v7.16b,v1.16b,v2.16b,#4
+ eor w11,w3,w3,ror#5
+ add w7,w7,w13
+ mov d19,v2.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w7,w7,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w6,w6,w12
+ add v3.4s,v3.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w6,w6,w11
+ ldr w12,[sp,#52]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w10,w10,w6
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w8
+ ushr v16.4s,v19.4s,#17
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ sli v16.4s,v19.4s,#15
+ add w6,w6,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w5,w5,w12
+ ror w11,w11,#6
+ add v3.4s,v3.4s,v5.4s
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ sli v7.4s,v19.4s,#13
+ add w5,w5,w11
+ ldr w12,[sp,#56]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ eor v17.16b,v17.16b,v7.16b
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ add v3.4s,v3.4s,v17.4s
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ ushr v18.4s,v3.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v3.4s,#10
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ sli v18.4s,v3.4s,#15
+ add w4,w4,w12
+ ushr v17.4s,v3.4s,#19
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ sli v17.4s,v3.4s,#13
+ ldr w12,[sp,#60]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w8,w8,w4
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w6
+ eor v17.16b,v17.16b,v17.16b
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ mov v17.d[1],v19.d[0]
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ add v3.4s,v3.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add v4.4s,v4.4s,v3.4s
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ ldr w12,[x16]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ cmp w12,#0 // check for K256 terminator
+ ldr w12,[sp,#0]
+ sub x17,x17,#64
+ bne .L_00_48
+
+ sub x16,x16,#256 // rewind x16
+ cmp x1,x2
+ mov x17, #64
+ csel x17, x17, xzr, eq
+ sub x1,x1,x17 // avoid SEGV
+ mov x17,sp
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ ld1 {v0.16b},[x1],#16
+ bic w15,w9,w7
+ eor w11,w7,w7,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w3,w3,w13
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ eor w15,w3,w3,ror#11
+ rev32 v0.16b,v0.16b
+ add w10,w10,w12
+ ror w11,w11,#6
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ add v4.4s,v4.4s,v0.4s
+ add w10,w10,w11
+ ldr w12,[sp,#4]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w6,w6,w10
+ eor w14,w14,w4
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ add w10,w10,w14
+ orr w12,w12,w15
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ add w9,w9,w12
+ ror w11,w11,#6
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ add w9,w9,w11
+ ldr w12,[sp,#8]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ orr w12,w12,w15
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ add w8,w8,w12
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ ldr w12,[sp,#12]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w4,w4,w8
+ eor w14,w14,w10
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#16]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ ld1 {v1.16b},[x1],#16
+ bic w15,w5,w3
+ eor w11,w3,w3,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w7,w7,w13
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ eor w15,w7,w7,ror#11
+ rev32 v1.16b,v1.16b
+ add w6,w6,w12
+ ror w11,w11,#6
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ add v4.4s,v4.4s,v1.4s
+ add w6,w6,w11
+ ldr w12,[sp,#20]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w10,w10,w6
+ eor w14,w14,w8
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ add w6,w6,w14
+ orr w12,w12,w15
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ add w5,w5,w12
+ ror w11,w11,#6
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ add w5,w5,w11
+ ldr w12,[sp,#24]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ orr w12,w12,w15
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ add w4,w4,w12
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ ldr w12,[sp,#28]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w8,w8,w4
+ eor w14,w14,w6
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ ldr w12,[sp,#32]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ ld1 {v2.16b},[x1],#16
+ bic w15,w9,w7
+ eor w11,w7,w7,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w3,w3,w13
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ eor w15,w3,w3,ror#11
+ rev32 v2.16b,v2.16b
+ add w10,w10,w12
+ ror w11,w11,#6
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ add v4.4s,v4.4s,v2.4s
+ add w10,w10,w11
+ ldr w12,[sp,#36]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w6,w6,w10
+ eor w14,w14,w4
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ add w10,w10,w14
+ orr w12,w12,w15
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ add w9,w9,w12
+ ror w11,w11,#6
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ add w9,w9,w11
+ ldr w12,[sp,#40]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ orr w12,w12,w15
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ add w8,w8,w12
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ ldr w12,[sp,#44]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w4,w4,w8
+ eor w14,w14,w10
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#48]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ ld1 {v3.16b},[x1],#16
+ bic w15,w5,w3
+ eor w11,w3,w3,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w7,w7,w13
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ eor w15,w7,w7,ror#11
+ rev32 v3.16b,v3.16b
+ add w6,w6,w12
+ ror w11,w11,#6
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ add v4.4s,v4.4s,v3.4s
+ add w6,w6,w11
+ ldr w12,[sp,#52]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w10,w10,w6
+ eor w14,w14,w8
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ add w6,w6,w14
+ orr w12,w12,w15
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ add w5,w5,w12
+ ror w11,w11,#6
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ add w5,w5,w11
+ ldr w12,[sp,#56]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ orr w12,w12,w15
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ add w4,w4,w12
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ ldr w12,[sp,#60]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w8,w8,w4
+ eor w14,w14,w6
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ add w3,w3,w15 // h+=Sigma0(a) from the past
+ ldp w11,w12,[x0,#0]
+ add w3,w3,w13 // h+=Maj(a,b,c) from the past
+ ldp w13,w14,[x0,#8]
+ add w3,w3,w11 // accumulate
+ add w4,w4,w12
+ ldp w11,w12,[x0,#16]
+ add w5,w5,w13
+ add w6,w6,w14
+ ldp w13,w14,[x0,#24]
+ add w7,w7,w11
+ add w8,w8,w12
+ ldr w12,[sp,#0]
+ stp w3,w4,[x0,#0]
+ add w9,w9,w13
+ mov w13,wzr
+ stp w5,w6,[x0,#8]
+ add w10,w10,w14
+ stp w7,w8,[x0,#16]
+ eor w14,w4,w5
+ stp w9,w10,[x0,#24]
+ mov w15,wzr
+ mov x17,sp
+ b.ne .L_00_48
+
+ ldr x29,[x29]
+ add sp,sp,#16*4+16
+ ret
+.size sha256_block_neon,.-sha256_block_neon
+#ifndef __KERNEL__
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/crypto/sha512-core.S b/arch/arm64/crypto/sha512-core.S
new file mode 100644
index 000000000000..bd0f59f06c9d
--- /dev/null
+++ b/arch/arm64/crypto/sha512-core.S
@@ -0,0 +1,1085 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License"). You may not use
+// this file except in compliance with the License. You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+// SHA256-hw SHA256(*) SHA512
+// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+// Denver 2.01 10.5 (+26%) 6.70 (+8%)
+// X-Gene 20.0 (+100%) 12.8 (+300%(***))
+// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+//
+// (*) Software SHA256 results are of lesser relevance, presented
+// mostly for informational purposes.
+// (**) The result is a trade-off: it's possible to improve it by
+// 10% (or by 1 cycle per round), but at the cost of 20% loss
+// on Cortex-A53 (or by 4 cycles per round).
+// (***) Super-impressive coefficients over gcc-generated code are
+// indication of some compiler "pathology", most notably code
+// generated with -mgeneral-regs-only is significanty faster
+// and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern OPENSSL_armcap_P
+.globl sha512_block_data_order
+.type sha512_block_data_order,%function
+.align 6
+sha512_block_data_order:
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*8
+
+ ldp x20,x21,[x0] // load context
+ ldp x22,x23,[x0,#2*8]
+ ldp x24,x25,[x0,#4*8]
+ add x2,x1,x2,lsl#7 // end of input
+ ldp x26,x27,[x0,#6*8]
+ adr x30,.LK512
+ stp x0,x2,[x29,#96]
+
+.Loop:
+ ldp x3,x4,[x1],#2*8
+ ldr x19,[x30],#8 // *K++
+ eor x28,x21,x22 // magic seed
+ str x1,[x29,#112]
+#ifndef __AARCH64EB__
+ rev x3,x3 // 0
+#endif
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ eor x6,x24,x24,ror#23
+ and x17,x25,x24
+ bic x19,x26,x24
+ add x27,x27,x3 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x6,ror#18 // Sigma1(e)
+ ror x6,x20,#28
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ eor x17,x20,x20,ror#5
+ add x27,x27,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x23,x23,x27 // d+=h
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x6,x17,ror#34 // Sigma0(a)
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x27,x27,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x4,x4 // 1
+#endif
+ ldp x5,x6,[x1],#2*8
+ add x27,x27,x17 // h+=Sigma0(a)
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ eor x7,x23,x23,ror#23
+ and x17,x24,x23
+ bic x28,x25,x23
+ add x26,x26,x4 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x7,ror#18 // Sigma1(e)
+ ror x7,x27,#28
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ eor x17,x27,x27,ror#5
+ add x26,x26,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x22,x22,x26 // d+=h
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x7,x17,ror#34 // Sigma0(a)
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x26,x26,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x5,x5 // 2
+#endif
+ add x26,x26,x17 // h+=Sigma0(a)
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ eor x8,x22,x22,ror#23
+ and x17,x23,x22
+ bic x19,x24,x22
+ add x25,x25,x5 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x8,ror#18 // Sigma1(e)
+ ror x8,x26,#28
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ eor x17,x26,x26,ror#5
+ add x25,x25,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x21,x21,x25 // d+=h
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x8,x17,ror#34 // Sigma0(a)
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x25,x25,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x6,x6 // 3
+#endif
+ ldp x7,x8,[x1],#2*8
+ add x25,x25,x17 // h+=Sigma0(a)
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ eor x9,x21,x21,ror#23
+ and x17,x22,x21
+ bic x28,x23,x21
+ add x24,x24,x6 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x9,ror#18 // Sigma1(e)
+ ror x9,x25,#28
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ eor x17,x25,x25,ror#5
+ add x24,x24,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x20,x20,x24 // d+=h
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x9,x17,ror#34 // Sigma0(a)
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x24,x24,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x7,x7 // 4
+#endif
+ add x24,x24,x17 // h+=Sigma0(a)
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ eor x10,x20,x20,ror#23
+ and x17,x21,x20
+ bic x19,x22,x20
+ add x23,x23,x7 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x10,ror#18 // Sigma1(e)
+ ror x10,x24,#28
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ eor x17,x24,x24,ror#5
+ add x23,x23,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x27,x27,x23 // d+=h
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x10,x17,ror#34 // Sigma0(a)
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x23,x23,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x8,x8 // 5
+#endif
+ ldp x9,x10,[x1],#2*8
+ add x23,x23,x17 // h+=Sigma0(a)
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ eor x11,x27,x27,ror#23
+ and x17,x20,x27
+ bic x28,x21,x27
+ add x22,x22,x8 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x11,ror#18 // Sigma1(e)
+ ror x11,x23,#28
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ eor x17,x23,x23,ror#5
+ add x22,x22,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x26,x26,x22 // d+=h
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x11,x17,ror#34 // Sigma0(a)
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x22,x22,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x9,x9 // 6
+#endif
+ add x22,x22,x17 // h+=Sigma0(a)
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ eor x12,x26,x26,ror#23
+ and x17,x27,x26
+ bic x19,x20,x26
+ add x21,x21,x9 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x12,ror#18 // Sigma1(e)
+ ror x12,x22,#28
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ eor x17,x22,x22,ror#5
+ add x21,x21,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x25,x25,x21 // d+=h
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x12,x17,ror#34 // Sigma0(a)
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x21,x21,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x10,x10 // 7
+#endif
+ ldp x11,x12,[x1],#2*8
+ add x21,x21,x17 // h+=Sigma0(a)
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ eor x13,x25,x25,ror#23
+ and x17,x26,x25
+ bic x28,x27,x25
+ add x20,x20,x10 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x13,ror#18 // Sigma1(e)
+ ror x13,x21,#28
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ eor x17,x21,x21,ror#5
+ add x20,x20,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x24,x24,x20 // d+=h
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x13,x17,ror#34 // Sigma0(a)
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x20,x20,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x11,x11 // 8
+#endif
+ add x20,x20,x17 // h+=Sigma0(a)
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ eor x14,x24,x24,ror#23
+ and x17,x25,x24
+ bic x19,x26,x24
+ add x27,x27,x11 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x14,ror#18 // Sigma1(e)
+ ror x14,x20,#28
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ eor x17,x20,x20,ror#5
+ add x27,x27,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x23,x23,x27 // d+=h
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x14,x17,ror#34 // Sigma0(a)
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x27,x27,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x12,x12 // 9
+#endif
+ ldp x13,x14,[x1],#2*8
+ add x27,x27,x17 // h+=Sigma0(a)
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ eor x15,x23,x23,ror#23
+ and x17,x24,x23
+ bic x28,x25,x23
+ add x26,x26,x12 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x15,ror#18 // Sigma1(e)
+ ror x15,x27,#28
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ eor x17,x27,x27,ror#5
+ add x26,x26,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x22,x22,x26 // d+=h
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x15,x17,ror#34 // Sigma0(a)
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x26,x26,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x13,x13 // 10
+#endif
+ add x26,x26,x17 // h+=Sigma0(a)
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ eor x0,x22,x22,ror#23
+ and x17,x23,x22
+ bic x19,x24,x22
+ add x25,x25,x13 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x0,ror#18 // Sigma1(e)
+ ror x0,x26,#28
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ eor x17,x26,x26,ror#5
+ add x25,x25,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x21,x21,x25 // d+=h
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x0,x17,ror#34 // Sigma0(a)
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x25,x25,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x14,x14 // 11
+#endif
+ ldp x15,x0,[x1],#2*8
+ add x25,x25,x17 // h+=Sigma0(a)
+ str x6,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ eor x6,x21,x21,ror#23
+ and x17,x22,x21
+ bic x28,x23,x21
+ add x24,x24,x14 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x6,ror#18 // Sigma1(e)
+ ror x6,x25,#28
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ eor x17,x25,x25,ror#5
+ add x24,x24,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x20,x20,x24 // d+=h
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x6,x17,ror#34 // Sigma0(a)
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x24,x24,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x15,x15 // 12
+#endif
+ add x24,x24,x17 // h+=Sigma0(a)
+ str x7,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ eor x7,x20,x20,ror#23
+ and x17,x21,x20
+ bic x19,x22,x20
+ add x23,x23,x15 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x7,ror#18 // Sigma1(e)
+ ror x7,x24,#28
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ eor x17,x24,x24,ror#5
+ add x23,x23,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x27,x27,x23 // d+=h
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x7,x17,ror#34 // Sigma0(a)
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x23,x23,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x0,x0 // 13
+#endif
+ ldp x1,x2,[x1]
+ add x23,x23,x17 // h+=Sigma0(a)
+ str x8,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ eor x8,x27,x27,ror#23
+ and x17,x20,x27
+ bic x28,x21,x27
+ add x22,x22,x0 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x8,ror#18 // Sigma1(e)
+ ror x8,x23,#28
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ eor x17,x23,x23,ror#5
+ add x22,x22,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x26,x26,x22 // d+=h
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x8,x17,ror#34 // Sigma0(a)
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x22,x22,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x1,x1 // 14
+#endif
+ ldr x6,[sp,#24]
+ add x22,x22,x17 // h+=Sigma0(a)
+ str x9,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ eor x9,x26,x26,ror#23
+ and x17,x27,x26
+ bic x19,x20,x26
+ add x21,x21,x1 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x9,ror#18 // Sigma1(e)
+ ror x9,x22,#28
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ eor x17,x22,x22,ror#5
+ add x21,x21,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x25,x25,x21 // d+=h
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x9,x17,ror#34 // Sigma0(a)
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x21,x21,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x2,x2 // 15
+#endif
+ ldr x7,[sp,#0]
+ add x21,x21,x17 // h+=Sigma0(a)
+ str x10,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x9,x4,#1
+ and x17,x26,x25
+ ror x8,x1,#19
+ bic x28,x27,x25
+ ror x10,x21,#28
+ add x20,x20,x2 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x9,x9,x4,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x10,x10,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x8,x8,x1,ror#61
+ eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x10,x21,ror#39 // Sigma0(a)
+ eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+ add x3,x3,x12
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x3,x3,x9
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x3,x3,x8
+.Loop_16_xx:
+ ldr x8,[sp,#8]
+ str x11,[sp,#0]
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ ror x10,x5,#1
+ and x17,x25,x24
+ ror x9,x2,#19
+ bic x19,x26,x24
+ ror x11,x20,#28
+ add x27,x27,x3 // h+=X[i]
+ eor x16,x16,x24,ror#18
+ eor x10,x10,x5,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x24,ror#41 // Sigma1(e)
+ eor x11,x11,x20,ror#34
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x9,x9,x2,ror#61
+ eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
+ add x27,x27,x16 // h+=Sigma1(e)
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x11,x20,ror#39 // Sigma0(a)
+ eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
+ add x4,x4,x13
+ add x23,x23,x27 // d+=h
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x4,x4,x10
+ add x27,x27,x17 // h+=Sigma0(a)
+ add x4,x4,x9
+ ldr x9,[sp,#16]
+ str x12,[sp,#8]
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ ror x11,x6,#1
+ and x17,x24,x23
+ ror x10,x3,#19
+ bic x28,x25,x23
+ ror x12,x27,#28
+ add x26,x26,x4 // h+=X[i]
+ eor x16,x16,x23,ror#18
+ eor x11,x11,x6,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x23,ror#41 // Sigma1(e)
+ eor x12,x12,x27,ror#34
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x10,x10,x3,ror#61
+ eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
+ add x26,x26,x16 // h+=Sigma1(e)
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x12,x27,ror#39 // Sigma0(a)
+ eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
+ add x5,x5,x14
+ add x22,x22,x26 // d+=h
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x5,x5,x11
+ add x26,x26,x17 // h+=Sigma0(a)
+ add x5,x5,x10
+ ldr x10,[sp,#24]
+ str x13,[sp,#16]
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ ror x12,x7,#1
+ and x17,x23,x22
+ ror x11,x4,#19
+ bic x19,x24,x22
+ ror x13,x26,#28
+ add x25,x25,x5 // h+=X[i]
+ eor x16,x16,x22,ror#18
+ eor x12,x12,x7,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x22,ror#41 // Sigma1(e)
+ eor x13,x13,x26,ror#34
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x11,x11,x4,ror#61
+ eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
+ add x25,x25,x16 // h+=Sigma1(e)
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x13,x26,ror#39 // Sigma0(a)
+ eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
+ add x6,x6,x15
+ add x21,x21,x25 // d+=h
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x6,x6,x12
+ add x25,x25,x17 // h+=Sigma0(a)
+ add x6,x6,x11
+ ldr x11,[sp,#0]
+ str x14,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ ror x13,x8,#1
+ and x17,x22,x21
+ ror x12,x5,#19
+ bic x28,x23,x21
+ ror x14,x25,#28
+ add x24,x24,x6 // h+=X[i]
+ eor x16,x16,x21,ror#18
+ eor x13,x13,x8,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x21,ror#41 // Sigma1(e)
+ eor x14,x14,x25,ror#34
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x12,x12,x5,ror#61
+ eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
+ add x24,x24,x16 // h+=Sigma1(e)
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x14,x25,ror#39 // Sigma0(a)
+ eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
+ add x7,x7,x0
+ add x20,x20,x24 // d+=h
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x7,x7,x13
+ add x24,x24,x17 // h+=Sigma0(a)
+ add x7,x7,x12
+ ldr x12,[sp,#8]
+ str x15,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ ror x14,x9,#1
+ and x17,x21,x20
+ ror x13,x6,#19
+ bic x19,x22,x20
+ ror x15,x24,#28
+ add x23,x23,x7 // h+=X[i]
+ eor x16,x16,x20,ror#18
+ eor x14,x14,x9,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x20,ror#41 // Sigma1(e)
+ eor x15,x15,x24,ror#34
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x13,x13,x6,ror#61
+ eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
+ add x23,x23,x16 // h+=Sigma1(e)
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x15,x24,ror#39 // Sigma0(a)
+ eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
+ add x8,x8,x1
+ add x27,x27,x23 // d+=h
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x8,x8,x14
+ add x23,x23,x17 // h+=Sigma0(a)
+ add x8,x8,x13
+ ldr x13,[sp,#16]
+ str x0,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ ror x15,x10,#1
+ and x17,x20,x27
+ ror x14,x7,#19
+ bic x28,x21,x27
+ ror x0,x23,#28
+ add x22,x22,x8 // h+=X[i]
+ eor x16,x16,x27,ror#18
+ eor x15,x15,x10,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x27,ror#41 // Sigma1(e)
+ eor x0,x0,x23,ror#34
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x14,x14,x7,ror#61
+ eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
+ add x22,x22,x16 // h+=Sigma1(e)
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x0,x23,ror#39 // Sigma0(a)
+ eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
+ add x9,x9,x2
+ add x26,x26,x22 // d+=h
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x9,x9,x15
+ add x22,x22,x17 // h+=Sigma0(a)
+ add x9,x9,x14
+ ldr x14,[sp,#24]
+ str x1,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ ror x0,x11,#1
+ and x17,x27,x26
+ ror x15,x8,#19
+ bic x19,x20,x26
+ ror x1,x22,#28
+ add x21,x21,x9 // h+=X[i]
+ eor x16,x16,x26,ror#18
+ eor x0,x0,x11,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x26,ror#41 // Sigma1(e)
+ eor x1,x1,x22,ror#34
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x15,x15,x8,ror#61
+ eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
+ add x21,x21,x16 // h+=Sigma1(e)
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x1,x22,ror#39 // Sigma0(a)
+ eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
+ add x10,x10,x3
+ add x25,x25,x21 // d+=h
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x10,x10,x0
+ add x21,x21,x17 // h+=Sigma0(a)
+ add x10,x10,x15
+ ldr x15,[sp,#0]
+ str x2,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x1,x12,#1
+ and x17,x26,x25
+ ror x0,x9,#19
+ bic x28,x27,x25
+ ror x2,x21,#28
+ add x20,x20,x10 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x1,x1,x12,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x2,x2,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x0,x0,x9,ror#61
+ eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x2,x21,ror#39 // Sigma0(a)
+ eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
+ add x11,x11,x4
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x11,x11,x1
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x11,x11,x0
+ ldr x0,[sp,#8]
+ str x3,[sp,#0]
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ ror x2,x13,#1
+ and x17,x25,x24
+ ror x1,x10,#19
+ bic x19,x26,x24
+ ror x3,x20,#28
+ add x27,x27,x11 // h+=X[i]
+ eor x16,x16,x24,ror#18
+ eor x2,x2,x13,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x24,ror#41 // Sigma1(e)
+ eor x3,x3,x20,ror#34
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x1,x1,x10,ror#61
+ eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
+ add x27,x27,x16 // h+=Sigma1(e)
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x3,x20,ror#39 // Sigma0(a)
+ eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
+ add x12,x12,x5
+ add x23,x23,x27 // d+=h
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x12,x12,x2
+ add x27,x27,x17 // h+=Sigma0(a)
+ add x12,x12,x1
+ ldr x1,[sp,#16]
+ str x4,[sp,#8]
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ ror x3,x14,#1
+ and x17,x24,x23
+ ror x2,x11,#19
+ bic x28,x25,x23
+ ror x4,x27,#28
+ add x26,x26,x12 // h+=X[i]
+ eor x16,x16,x23,ror#18
+ eor x3,x3,x14,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x23,ror#41 // Sigma1(e)
+ eor x4,x4,x27,ror#34
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x2,x2,x11,ror#61
+ eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
+ add x26,x26,x16 // h+=Sigma1(e)
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x4,x27,ror#39 // Sigma0(a)
+ eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
+ add x13,x13,x6
+ add x22,x22,x26 // d+=h
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x13,x13,x3
+ add x26,x26,x17 // h+=Sigma0(a)
+ add x13,x13,x2
+ ldr x2,[sp,#24]
+ str x5,[sp,#16]
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ ror x4,x15,#1
+ and x17,x23,x22
+ ror x3,x12,#19
+ bic x19,x24,x22
+ ror x5,x26,#28
+ add x25,x25,x13 // h+=X[i]
+ eor x16,x16,x22,ror#18
+ eor x4,x4,x15,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x22,ror#41 // Sigma1(e)
+ eor x5,x5,x26,ror#34
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x3,x3,x12,ror#61
+ eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
+ add x25,x25,x16 // h+=Sigma1(e)
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x5,x26,ror#39 // Sigma0(a)
+ eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
+ add x14,x14,x7
+ add x21,x21,x25 // d+=h
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x14,x14,x4
+ add x25,x25,x17 // h+=Sigma0(a)
+ add x14,x14,x3
+ ldr x3,[sp,#0]
+ str x6,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ ror x5,x0,#1
+ and x17,x22,x21
+ ror x4,x13,#19
+ bic x28,x23,x21
+ ror x6,x25,#28
+ add x24,x24,x14 // h+=X[i]
+ eor x16,x16,x21,ror#18
+ eor x5,x5,x0,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x21,ror#41 // Sigma1(e)
+ eor x6,x6,x25,ror#34
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x4,x4,x13,ror#61
+ eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
+ add x24,x24,x16 // h+=Sigma1(e)
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x6,x25,ror#39 // Sigma0(a)
+ eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
+ add x15,x15,x8
+ add x20,x20,x24 // d+=h
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x15,x15,x5
+ add x24,x24,x17 // h+=Sigma0(a)
+ add x15,x15,x4
+ ldr x4,[sp,#8]
+ str x7,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ ror x6,x1,#1
+ and x17,x21,x20
+ ror x5,x14,#19
+ bic x19,x22,x20
+ ror x7,x24,#28
+ add x23,x23,x15 // h+=X[i]
+ eor x16,x16,x20,ror#18
+ eor x6,x6,x1,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x20,ror#41 // Sigma1(e)
+ eor x7,x7,x24,ror#34
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x5,x5,x14,ror#61
+ eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
+ add x23,x23,x16 // h+=Sigma1(e)
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x7,x24,ror#39 // Sigma0(a)
+ eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
+ add x0,x0,x9
+ add x27,x27,x23 // d+=h
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x0,x0,x6
+ add x23,x23,x17 // h+=Sigma0(a)
+ add x0,x0,x5
+ ldr x5,[sp,#16]
+ str x8,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ ror x7,x2,#1
+ and x17,x20,x27
+ ror x6,x15,#19
+ bic x28,x21,x27
+ ror x8,x23,#28
+ add x22,x22,x0 // h+=X[i]
+ eor x16,x16,x27,ror#18
+ eor x7,x7,x2,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x27,ror#41 // Sigma1(e)
+ eor x8,x8,x23,ror#34
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x6,x6,x15,ror#61
+ eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
+ add x22,x22,x16 // h+=Sigma1(e)
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x8,x23,ror#39 // Sigma0(a)
+ eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
+ add x1,x1,x10
+ add x26,x26,x22 // d+=h
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x1,x1,x7
+ add x22,x22,x17 // h+=Sigma0(a)
+ add x1,x1,x6
+ ldr x6,[sp,#24]
+ str x9,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ ror x8,x3,#1
+ and x17,x27,x26
+ ror x7,x0,#19
+ bic x19,x20,x26
+ ror x9,x22,#28
+ add x21,x21,x1 // h+=X[i]
+ eor x16,x16,x26,ror#18
+ eor x8,x8,x3,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x26,ror#41 // Sigma1(e)
+ eor x9,x9,x22,ror#34
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x7,x7,x0,ror#61
+ eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
+ add x21,x21,x16 // h+=Sigma1(e)
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x9,x22,ror#39 // Sigma0(a)
+ eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
+ add x2,x2,x11
+ add x25,x25,x21 // d+=h
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x2,x2,x8
+ add x21,x21,x17 // h+=Sigma0(a)
+ add x2,x2,x7
+ ldr x7,[sp,#0]
+ str x10,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x9,x4,#1
+ and x17,x26,x25
+ ror x8,x1,#19
+ bic x28,x27,x25
+ ror x10,x21,#28
+ add x20,x20,x2 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x9,x9,x4,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x10,x10,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x8,x8,x1,ror#61
+ eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x10,x21,ror#39 // Sigma0(a)
+ eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+ add x3,x3,x12
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x3,x3,x9
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x3,x3,x8
+ cbnz x19,.Loop_16_xx
+
+ ldp x0,x2,[x29,#96]
+ ldr x1,[x29,#112]
+ sub x30,x30,#648 // rewind
+
+ ldp x3,x4,[x0]
+ ldp x5,x6,[x0,#2*8]
+ add x1,x1,#14*8 // advance input pointer
+ ldp x7,x8,[x0,#4*8]
+ add x20,x20,x3
+ ldp x9,x10,[x0,#6*8]
+ add x21,x21,x4
+ add x22,x22,x5
+ add x23,x23,x6
+ stp x20,x21,[x0]
+ add x24,x24,x7
+ add x25,x25,x8
+ stp x22,x23,[x0,#2*8]
+ add x26,x26,x9
+ add x27,x27,x10
+ cmp x1,x2
+ stp x24,x25,[x0,#4*8]
+ stp x26,x27,[x0,#6*8]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*8
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size sha512_block_data_order,.-sha512_block_data_order
+
+.align 6
+.type .LK512,%object
+.LK512:
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0 // terminator
+.size .LK512,.-.LK512
+#ifndef __KERNEL__
+.align 3
+.LOPENSSL_armcap_P:
+# ifdef __ILP32__
+ .long OPENSSL_armcap_P-.
+# else
+ .quad OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#ifndef __KERNEL__
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 3a4301163e04..a2597e3f70ad 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -25,6 +25,7 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+#include <asm/cputype.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
@@ -96,6 +97,24 @@
.endm
/*
+ * Value prediction barrier
+ */
+ .macro csdb
+ hint #20
+ .endm
+
+/*
+ * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
+ * of bounds.
+ */
+ .macro mask_nospec64, idx, limit, tmp
+ sub \tmp, \idx, \limit
+ bic \tmp, \tmp, \idx
+ and \idx, \idx, \tmp, asr #63
+ csdb
+ .endm
+
+/*
* NOP sequence
*/
.macro nops, num
@@ -453,4 +472,47 @@ alternative_else_nop_endif
#endif
.endm
+ .macro pte_to_phys, phys, pte
+ and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+ .endm
+
+/*
+ * Check the MIDR_EL1 of the current CPU for a given model and a range of
+ * variant/revision. See asm/cputype.h for the macros used below.
+ *
+ * model: MIDR_CPU_MODEL of CPU
+ * rv_min: Minimum of MIDR_CPU_VAR_REV()
+ * rv_max: Maximum of MIDR_CPU_VAR_REV()
+ * res: Result register.
+ * tmp1, tmp2, tmp3: Temporary registers
+ *
+ * Corrupts: res, tmp1, tmp2, tmp3
+ * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
+ */
+ .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
+ mrs \res, midr_el1
+ mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
+ mov_q \tmp2, MIDR_CPU_MODEL_MASK
+ and \tmp3, \res, \tmp2 // Extract model
+ and \tmp1, \res, \tmp1 // rev & variant
+ mov_q \tmp2, \model
+ cmp \tmp3, \tmp2
+ cset \res, eq
+ cbz \res, .Ldone\@ // Model matches ?
+
+ .if (\rv_min != 0) // Skip min check if rv_min == 0
+ mov_q \tmp3, \rv_min
+ cmp \tmp1, \tmp3
+ cset \res, ge
+ .endif // \rv_min != 0
+ /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
+ .if ((\rv_min != \rv_max) || \rv_min == 0)
+ mov_q \tmp2, \rv_max
+ cmp \tmp1, \tmp2
+ cset \tmp2, le
+ and \res, \res, \tmp2
+ .endif
+.Ldone\@:
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fe7e43b7fbc..0b0755c961ac 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -31,6 +31,8 @@
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
+#define csdb() asm volatile("hint #20" : : : "memory")
+
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)
@@ -38,6 +40,27 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+/*
+ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
+ * and 0 otherwise.
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ " cmp %1, %2\n"
+ " sbc %0, xzr, xzr\n"
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ csdb();
+ return mask;
+}
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 87b446535185..ce67bf6a0886 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -34,7 +34,9 @@
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HYP_OFFSET_LOW 14
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+#define ARM64_UNMAP_KERNEL_AT_EL0 16
+#define ARM64_HARDEN_BRANCH_PREDICTOR 17
-#define ARM64_NCAPS 16
+#define ARM64_NCAPS 18
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 26a68ddb11c1..39d1db68748d 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -56,6 +56,9 @@
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
+#define MIDR_CPU_VAR_REV(var, rev) \
+ (((var) << MIDR_VARIANT_SHIFT) | (rev))
+
#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
@@ -74,20 +77,31 @@
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
+#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A72 0xD08
#define ARM_CPU_PART_CORTEX_A53 0xD03
+#define ARM_CPU_PART_CORTEX_A73 0xD09
+#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
#define BRCM_CPU_PART_VULCAN 0x516
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index caf86be815ba..d8e58051f32d 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -51,6 +51,12 @@ enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ FIX_ENTRY_TRAMP_DATA,
+ FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 85c4a8981d47..a891bb6bb3d4 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,20 +48,10 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -91,30 +81,24 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 val, tmp;
+ u32 __user *uaddr;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
return -EFAULT;
+ uaddr = __uaccess_mask_ptr(_uaddr);
uaccess_enable();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e5050388e062..0a33ea304e63 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -73,6 +73,9 @@ struct kvm_arch {
/* Timer */
struct arch_timer_kvm timer;
+
+ /* Mandated version of PSCI */
+ u32 psci_version;
};
#define KVM_NR_MEM_OBJS 40
@@ -393,4 +396,9 @@ static inline void __cpu_init_stage2(void)
"PARange is %d bits, unsupported configuration!", parange);
}
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+ return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6d22017ebbad..80bf33715ecb 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -313,5 +313,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+ struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+ void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+ if (data->fn) {
+ vect = __bp_harden_hyp_vecs_start +
+ data->hyp_vectors_slot * SZ_2K;
+
+ if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ vect = lm_alias(vect);
+ }
+
+ return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+ kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+ PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+ return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return 0;
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
deleted file mode 100644
index bc39e557c56c..000000000000
--- a/arch/arm64/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_KVM_PSCI_H__
-#define __ARM64_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1 1
-#define KVM_ARM_PSCI_0_2 2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 53211a0acf0f..ba917be5565a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -60,12 +60,12 @@
* KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
- * TASK_SIZE - the maximum size of a user space task.
- * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
-#define VA_START (UL(0xffffffffffffffff) << VA_BITS)
-#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
+#define VA_START (UL(0xffffffffffffffff) - \
+ (UL(1) << VA_BITS) + 1)
+#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
+ (UL(1) << (VA_BITS - 1)) + 1)
#define KIMAGE_VADDR (MODULES_END)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
@@ -74,19 +74,6 @@
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 UL(0x100000000)
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#else
-#define TASK_SIZE TASK_SIZE_64
-#endif /* CONFIG_COMPAT */
-
-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
#define KERNEL_START _text
#define KERNEL_END _end
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 8d9fce037b2f..d51158a61892 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,12 @@
#ifndef __ASM_MMU_H
#define __ASM_MMU_H
+#define USER_ASID_FLAG (UL(1) << 48)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/percpu.h>
+
typedef struct {
atomic64_t id;
void *vdso;
@@ -28,6 +34,49 @@ typedef struct {
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
+static inline bool arm64_kernel_unmapped_at_el0(void)
+{
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+ cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+}
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+ int hyp_vectors_slot;
+ bp_hardening_cb_t fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+ struct bp_hardening_data *d;
+
+ if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ return;
+
+ d = arm64_get_bp_hardening_data();
+ if (d->fn)
+ d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void) { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
@@ -37,4 +86,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
pgprot_t prot, bool allow_block_mappings);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 0363fe80455c..041014cee28b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -51,6 +51,13 @@ static inline void cpu_set_reserved_ttbr0(void)
isb();
}
+static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+ BUG_ON(pgd == swapper_pg_dir);
+ cpu_set_reserved_ttbr0();
+ cpu_do_switch_mm(virt_to_phys(pgd),mm);
+}
+
/*
* TCR.T0SZ value to use when the ID map is active. Usually equals
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index eb0c2bd90de9..8df4cb6ac6f7 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -272,6 +272,7 @@
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
+#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
#define TCR_HA (UL(1) << 39)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 2142c7726e76..f705d96a76f2 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -34,8 +34,14 @@
#include <asm/pgtable-types.h>
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+
+#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
+#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -47,23 +53,24 @@
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
-#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
-#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
+#define PAGE_KERNEL __pgprot(PROT_NORMAL)
+#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
+#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
+#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
-#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
-#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
-#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
-#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7acd3c5c7643..3a30a3994e4a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -692,6 +692,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 14ad6e4e87d1..16cef2e8449e 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
-#define cpu_switch_mm(pgd,mm) \
-do { \
- BUG_ON(pgd == swapper_pg_dir); \
- cpu_do_switch_mm(virt_to_phys(pgd),mm); \
-} while (0)
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 60e34824e18c..5917147af0c4 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,6 +19,13 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
+#define TASK_SIZE_64 (UL(1) << VA_BITS)
+
+#define KERNEL_DS UL(-1)
+#define USER_DS (TASK_SIZE_64 - 1)
+
+#ifndef __ASSEMBLY__
+
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -37,6 +44,22 @@
#include <asm/ptrace.h>
#include <asm/types.h>
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ */
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 UL(0x100000000)
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif /* CONFIG_COMPAT */
+
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000
@@ -192,4 +215,5 @@ int cpu_enable_pan(void *__unused);
int cpu_enable_uao(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused);
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7393cc767edb..88bbe364b6ae 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -117,6 +117,9 @@
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index deab52374119..ad6bd8b26ada 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/cputype.h>
+#include <asm/mmu.h>
/*
* Raw TLBI operations.
@@ -42,6 +43,11 @@
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
+#define __tlbi_user(op, arg) do { \
+ if (arm64_kernel_unmapped_at_el0()) \
+ __tlbi(op, (arg) | USER_ASID_FLAG); \
+} while (0)
+
/*
* TLB Management
* ==============
@@ -103,6 +109,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ishst);
__tlbi(aside1is, asid);
+ __tlbi_user(aside1is, asid);
dsb(ish);
}
@@ -113,6 +120,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ishst);
__tlbi(vale1is, addr);
+ __tlbi_user(vale1is, addr);
dsb(ish);
}
@@ -139,10 +147,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (last_level)
+ if (last_level) {
__tlbi(vale1is, addr);
- else
+ __tlbi_user(vale1is, addr);
+ } else {
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
+ }
}
dsb(ish);
}
@@ -182,6 +193,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
dsb(ish);
}
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 31b6940ec2f2..71a8ab47fabc 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -33,6 +33,7 @@
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/errno.h>
#include <asm/memory.h>
@@ -63,10 +64,7 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
-#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
-
-#define USER_DS TASK_SIZE_64
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
@@ -74,6 +72,13 @@ static inline void set_fs(mm_segment_t fs)
current_thread_info()->addr_limit = fs;
/*
+ * Prevent a mispredicted conditional call to set_fs from forwarding
+ * the wrong address limit to access_ok under speculation.
+ */
+ dsb(nsh);
+ isb();
+
+ /*
* Enable/disable UAO so that copy_to_user() etc can access
* kernel memory with the unprivileged instructions.
*/
@@ -91,22 +96,32 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
-#define __range_ok(addr, size) \
-({ \
- unsigned long __addr = (unsigned long __force)(addr); \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
- : "=&r" (flag), "=&r" (roksum) \
- : "1" (__addr), "Ir" (size), \
- "r" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit;
+
+ __chk_user_ptr(addr);
+ asm volatile(
+ // A + B <= C + 1 for all A,B,C, in four easy steps:
+ // 1: X = A + B; X' = X % 2^64
+ " adds %0, %0, %2\n"
+ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+ " csel %1, xzr, %1, hi\n"
+ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+ // to compensate for the carry flag being set in step 4. For
+ // X > 2^64, X' merely has to remain nonzero, which it does.
+ " csinv %0, %0, xzr, cc\n"
+ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+ // comes from the carry in being clear. Otherwise, we are
+ // testing X' - C == 0, subject to the previous adjustments.
+ " sbcs xzr, %0, %1\n"
+ " cset %0, ls\n"
+ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+ return addr;
+}
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -115,7 +130,7 @@ static inline void set_fs(mm_segment_t fs)
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
-#define access_ok(type, addr, size) __range_ok(addr, size)
+#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
@@ -218,6 +233,26 @@ static inline void uaccess_enable_not_uao(void)
}
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if above the
+ * current addr_limit.
+ */
+#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+{
+ void __user *safe_ptr;
+
+ asm volatile(
+ " bics xzr, %1, %2\n"
+ " csel %0, %1, xzr, eq\n"
+ : "=&r" (safe_ptr)
+ : "r" (ptr), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
* call.
@@ -269,30 +304,35 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user(x, ptr) \
+#define __get_user_check(x, ptr, err) \
({ \
- int __gu_err = 0; \
- __get_user_err((x), (ptr), __gu_err); \
- __gu_err; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
+ __p = uaccess_mask_ptr(__p); \
+ __get_user_err((x), __p, (err)); \
+ } else { \
+ (x) = 0; (err) = -EFAULT; \
+ } \
})
#define __get_user_error(x, ptr, err) \
({ \
- __get_user_err((x), (ptr), (err)); \
+ __get_user_check((x), (ptr), (err)); \
(void)0; \
})
-#define __get_user_unaligned __get_user
-
-#define get_user(x, ptr) \
+#define __get_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
- access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
- __get_user((x), __p) : \
- ((x) = 0, -EFAULT); \
+ int __gu_err = 0; \
+ __get_user_check((x), (ptr), __gu_err); \
+ __gu_err; \
})
+#define __get_user_unaligned __get_user
+
+#define get_user __get_user
+
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
asm volatile( \
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
@@ -335,47 +375,51 @@ do { \
uaccess_disable_not_uao(); \
} while (0)
-#define __put_user(x, ptr) \
+#define __put_user_check(x, ptr, err) \
({ \
- int __pu_err = 0; \
- __put_user_err((x), (ptr), __pu_err); \
- __pu_err; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
+ __p = uaccess_mask_ptr(__p); \
+ __put_user_err((x), __p, (err)); \
+ } else { \
+ (err) = -EFAULT; \
+ } \
})
#define __put_user_error(x, ptr, err) \
({ \
- __put_user_err((x), (ptr), (err)); \
+ __put_user_check((x), (ptr), (err)); \
(void)0; \
})
-#define __put_user_unaligned __put_user
-
-#define put_user(x, ptr) \
+#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
- access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
- __put_user((x), __p) : \
- -EFAULT; \
+ int __pu_err = 0; \
+ __put_user_check((x), (ptr), __pu_err); \
+ __pu_err; \
})
+#define __put_user_unaligned __put_user
+
+#define put_user __put_user
+
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
check_object_size(to, n, false);
- return __arch_copy_from_user(to, from, n);
+ return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
check_object_size(from, n, true);
- return __arch_copy_to_user(to, from, n);
+ return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -403,22 +447,25 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
return n;
}
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
- n = __copy_in_user(to, from, n);
+ n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
return n;
}
+#define copy_in_user __copy_in_user
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __clear_user(to, n);
+ n = __arch_clear_user(__uaccess_mask_ptr(to), n);
return n;
}
+#define clear_user __clear_user
extern long strncpy_from_user(char *dest, const char __user *src, long count);
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86a9b5f..702de7a2b024 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 6a7384eee08d..5c26760225ea 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -52,6 +52,10 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
cpu-reset.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
+endif
+
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
head-y := head.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index e9c4dc9e0ada..66be504edb6c 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 14709b7f19df..8e44b06277f3 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,6 +24,7 @@
#include <linux/kvm_host.h>
#include <linux/suspend.h>
#include <asm/cpufeature.h>
+#include <asm/fixmap.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/smp_plat.h>
@@ -148,11 +149,14 @@ int main(void)
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
-
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
+ BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 000000000000..dc4eb154e33b
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,75 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
+
+.macro ventry target
+ .rept 31
+ nop
+ .endr
+ b \target
+.endm
+
+.macro vectors target
+ ventry \target + 0x000
+ ventry \target + 0x080
+ ventry \target + 0x100
+ ventry \target + 0x180
+
+ ventry \target + 0x200
+ ventry \target + 0x280
+ ventry \target + 0x300
+ ventry \target + 0x380
+
+ ventry \target + 0x400
+ ventry \target + 0x480
+ ventry \target + 0x500
+ ventry \target + 0x580
+
+ ventry \target + 0x600
+ ventry \target + 0x680
+ ventry \target + 0x700
+ ventry \target + 0x780
+.endm
+
+ .align 11
+ENTRY(__bp_harden_hyp_vecs_start)
+ .rept 4
+ vectors __kvm_hyp_vector
+ .endr
+ENTRY(__bp_harden_hyp_vecs_end)
+
+.macro smccc_workaround_1 inst
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+ \inst #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ ldp x0, x1, [sp, #(8 * 2)]
+ add sp, sp, #(8 * 4)
+.endm
+
+ENTRY(__smccc_workaround_1_smc_start)
+ smccc_workaround_1 smc
+ENTRY(__smccc_workaround_1_smc_end)
+
+ENTRY(__smccc_workaround_1_hvc_start)
+ smccc_workaround_1 hvc
+ENTRY(__smccc_workaround_1_hvc_end)
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 65f42d257414..f736a6f81ecd 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -16,7 +16,7 @@
#include <asm/virt.h>
.text
-.pushsection .idmap.text, "ax"
+.pushsection .idmap.text, "awx"
/*
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b75e917aac46..74107134cc30 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -46,6 +46,147 @@ static int cpu_enable_trap_ctr_access(void *__unused)
return 0;
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __smccc_workaround_1_smc_start[];
+extern char __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_1_hvc_start[];
+extern char __smccc_workaround_1_hvc_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ static int last_slot = -1;
+ static DEFINE_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+
+ spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ last_slot++;
+ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+ / SZ_2K) <= last_slot);
+ slot = last_slot;
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ spin_unlock(&bp_lock);
+}
+#else
+#define __smccc_workaround_1_smc_start NULL
+#define __smccc_workaround_1_smc_end NULL
+#define __smccc_workaround_1_hvc_start NULL
+#define __smccc_workaround_1_hvc_end NULL
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+ bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ u64 pfr0;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+ return;
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return;
+
+ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+
+#include <uapi/linux/psci.h>
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
+
+static void call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static int enable_smccc_arch_workaround_1(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+ bp_hardening_cb_t cb;
+ void *smccc_start, *smccc_end;
+ struct arm_smccc_res res;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+ return 0;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ return 0;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+ return 0;
+ cb = call_hvc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_hvc_start;
+ smccc_end = __smccc_workaround_1_hvc_end;
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+ return 0;
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+
+ default:
+ return 0;
+ }
+
+ install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+
+ return 0;
+}
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
#define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
@@ -53,6 +194,13 @@ static int cpu_enable_trap_ctr_access(void *__unused)
.midr_range_min = min, \
.midr_range_max = max
+#define MIDR_ALL_VERSIONS(model) \
+ .def_scope = SCOPE_LOCAL_CPU, \
+ .matches = is_affected_midr_range, \
+ .midr_model = model, \
+ .midr_range_min = 0, \
+ .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -130,6 +278,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ .enable = enable_smccc_arch_workaround_1,
+ },
+#endif
{
}
};
@@ -143,15 +323,18 @@ void verify_local_cpu_errata_workarounds(void)
{
const struct arm64_cpu_capabilities *caps = arm64_errata;
- for (; caps->matches; caps++)
- if (!cpus_have_cap(caps->capability) &&
- caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ for (; caps->matches; caps++) {
+ if (cpus_have_cap(caps->capability)) {
+ if (caps->enable)
+ caps->enable((void *)caps);
+ } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
pr_crit("CPU%d: Requires work around for %s, not detected"
" at boot time\n",
smp_processor_id(),
caps->desc ? : "an erratum");
cpu_die_early();
}
+ }
}
void update_cpu_errata_workarounds(void)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index af5a1e34fb07..e60f6de1db09 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -94,7 +94,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 24, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
@@ -747,6 +749,86 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ char const *str = "command line option";
+ u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+
+ /*
+ * For reasons that aren't entirely clear, enabling KPTI on Cavium
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+ * ends as well as you might imagine. Don't even try.
+ */
+ if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+
+ /* Forced? */
+ if (__kpti_forced) {
+ pr_info_once("kernel page table isolation forced %s by %s\n",
+ __kpti_forced > 0 ? "ON" : "OFF", str);
+ return __kpti_forced > 0;
+ }
+
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return true;
+
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+ case MIDR_CAVIUM_THUNDERX2:
+ case MIDR_BRCM_VULCAN:
+ return false;
+ }
+
+ /* Defer to CPU feature registers */
+ return !cpuid_feature_extract_unsigned_field(pfr0,
+ ID_AA64PFR0_CSV3_SHIFT);
+}
+
+static int kpti_install_ng_mappings(void *__unused)
+{
+ typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+ extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+ kpti_remap_fn *remap_fn;
+
+ static bool kpti_applied = false;
+ int cpu = smp_processor_id();
+
+ if (kpti_applied)
+ return 0;
+
+ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+
+ cpu_install_idmap();
+ remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+ cpu_uninstall_idmap();
+
+ if (!cpu)
+ kpti_applied = true;
+
+ return 0;
+}
+
+static int __init parse_kpti(char *str)
+{
+ bool enabled;
+ int ret = strtobool(str, &enabled);
+
+ if (ret)
+ return ret;
+
+ __kpti_forced = enabled ? 1 : -1;
+ return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -830,6 +912,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ {
+ .desc = "Kernel page table isolation (KPTI)",
+ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = unmap_kernel_at_el0,
+ .enable = kpti_install_ng_mappings,
+ },
+#endif
{},
};
@@ -923,6 +1014,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
cap_set_elf_hwcap(hwcaps);
}
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+ unsigned int cap)
+{
+ const struct arm64_cpu_capabilities *caps;
+
+ if (WARN_ON(preemptible()))
+ return false;
+
+ for (caps = cap_array; caps->matches; caps++)
+ if (caps->capability == cap &&
+ caps->matches(caps, SCOPE_LOCAL_CPU))
+ return true;
+ return false;
+}
+
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info)
{
@@ -950,7 +1060,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
- stop_machine(caps->enable, NULL, cpu_online_mask);
+ stop_machine(caps->enable, (void *)caps, cpu_online_mask);
}
/*
@@ -991,8 +1101,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
}
static void
-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
{
+ const struct arm64_cpu_capabilities *caps = caps_list;
for (; caps->matches; caps++) {
if (!cpus_have_cap(caps->capability))
continue;
@@ -1000,13 +1111,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
* If the new CPU misses an advertised feature, we cannot proceed
* further, park the cpu.
*/
- if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ if (!__this_cpu_has_cap(caps_list, caps->capability)) {
pr_crit("CPU%d: missing feature: %s\n",
smp_processor_id(), caps->desc);
cpu_die_early();
}
if (caps->enable)
- caps->enable(NULL);
+ caps->enable((void *)caps);
}
}
@@ -1053,22 +1164,12 @@ static void __init setup_feature_capabilities(void)
enable_cpu_capabilities(arm64_features);
}
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
+extern const struct arm64_cpu_capabilities arm64_errata[];
+
bool this_cpu_has_cap(unsigned int cap)
{
- const struct arm64_cpu_capabilities *caps;
-
- if (WARN_ON(preemptible()))
- return false;
-
- for (caps = arm64_features; caps->desc; caps++)
- if (caps->capability == cap && caps->matches)
- return caps->matches(caps, SCOPE_LOCAL_CPU);
-
- return false;
+ return (__this_cpu_has_cap(arm64_features, cap) ||
+ __this_cpu_has_cap(arm64_errata, cap));
}
void __init setup_cpu_features(void)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5cf8aa8556f1..2bfe18d2e739 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -31,9 +31,12 @@
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
+#include <asm/mmu.h>
+#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
+#include <asm/kernel-pgtable.h>
/*
* Context tracking subsystem. Used to instrument transitions
@@ -70,8 +73,31 @@
#define BAD_FIQ 2
#define BAD_ERROR 3
- .macro kernel_entry, el, regsize = 64
+ .macro kernel_ventry, el, label, regsize = 64
+ .align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+ .if \regsize == 64
+ mrs x30, tpidrro_el0
+ msr tpidrro_el0, xzr
+ .else
+ mov x30, xzr
+ .endif
+ .endif
+alternative_else_nop_endif
+#endif
+
sub sp, sp, #S_FRAME_SIZE
+ b el\()\el\()_\label
+ .endm
+
+ .macro tramp_alias, dst, sym
+ mov_q \dst, TRAMP_VALIAS
+ add \dst, \dst, #(\sym - .entry.tramp.text)
+ .endm
+
+ .macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
@@ -104,7 +130,7 @@
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #TASK_SIZE_64
+ mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
@@ -211,18 +237,20 @@ alternative_else_nop_endif
.if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+ tst x22, #PSR_MODE32_BIT // native task?
+ b.eq 3f
+
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
- tbz x22, #4, 1f
#ifdef CONFIG_PID_IN_CONTEXTIDR
mrs x29, contextidr_el1
msr contextidr_el1, x29
#else
msr contextidr_el1, xzr
#endif
-1:
alternative_else_nop_endif
#endif
+3:
.endif
msr elr_el1, x21 // set up the return data
@@ -244,7 +272,21 @@ alternative_else_nop_endif
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
- eret // return to kernel
+
+ .if \el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+ msr far_el1, x30
+ tramp_alias x30, tramp_exit_native
+ br x30
+4:
+ tramp_alias x30, tramp_exit_compat
+ br x30
+#endif
+ .else
+ eret
+ .endif
.endm
.macro irq_stack_entry
@@ -316,31 +358,31 @@ tsk .req x28 // current thread_info
.align 11
ENTRY(vectors)
- ventry el1_sync_invalid // Synchronous EL1t
- ventry el1_irq_invalid // IRQ EL1t
- ventry el1_fiq_invalid // FIQ EL1t
- ventry el1_error_invalid // Error EL1t
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
- ventry el1_sync // Synchronous EL1h
- ventry el1_irq // IRQ EL1h
- ventry el1_fiq_invalid // FIQ EL1h
- ventry el1_error_invalid // Error EL1h
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error_invalid // Error EL1h
- ventry el0_sync // Synchronous 64-bit EL0
- ventry el0_irq // IRQ 64-bit EL0
- ventry el0_fiq_invalid // FIQ 64-bit EL0
- ventry el0_error_invalid // Error 64-bit EL0
+ kernel_ventry 0, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, irq // IRQ 64-bit EL0
+ kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry 0, error_invalid // Error 64-bit EL0
#ifdef CONFIG_COMPAT
- ventry el0_sync_compat // Synchronous 32-bit EL0
- ventry el0_irq_compat // IRQ 32-bit EL0
- ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- ventry el0_error_invalid_compat // Error 32-bit EL0
+ kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
#else
- ventry el0_sync_invalid // Synchronous 32-bit EL0
- ventry el0_irq_invalid // IRQ 32-bit EL0
- ventry el0_fiq_invalid // FIQ 32-bit EL0
- ventry el0_error_invalid // Error 32-bit EL0
+ kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
END(vectors)
@@ -612,13 +654,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ msr daifclr, #(8 | 4 | 1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
- bl do_mem_abort
+ bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
@@ -645,8 +689,10 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_dbg
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -705,6 +751,11 @@ el0_irq_naked:
#endif
ct_user_exit
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ tbz x22, #55, 1f
+ bl do_el0_irq_bp_hardening
+1:
+#endif
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -817,6 +868,7 @@ el0_svc_naked: // compat entry point
b.ne __sys_trace
cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys
+ mask_nospec64 scno, sc_nr, x19 // enforce bounds for syscall number
ldr x16, [stbl, scno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
b ret_fast_syscall
@@ -865,6 +917,105 @@ __ni_sys_trace:
.popsection // .entry.text
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+ .pushsection ".entry.tramp.text", "ax"
+
+ .macro tramp_map_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ sub \tmp, \tmp, #SWAPPER_DIR_SIZE
+ bic \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+ .endm
+
+ .macro tramp_unmap_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ add \tmp, \tmp, #SWAPPER_DIR_SIZE
+ orr \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+ /*
+ * We avoid running the post_ttbr_update_workaround here because
+ * it's only needed by Cavium ThunderX, which requires KPTI to be
+ * disabled.
+ */
+ .endm
+
+ .macro tramp_ventry, regsize = 64
+ .align 7
+1:
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+ * entry onto the return stack and using a RET instruction to
+ * enter the full-fat kernel vectors.
+ */
+ bl 2f
+ b .
+2:
+ tramp_map_kernel x30
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x30, tramp_vectors + PAGE_SIZE
+ isb
+ ldr x30, [x30]
+#else
+ ldr x30, =vectors
+#endif
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ msr vbar_el1, x30
+ add x30, x30, #(1b - tramp_vectors)
+ isb
+ ret
+ .endm
+
+ .macro tramp_exit, regsize = 64
+ adr x30, tramp_vectors
+ msr vbar_el1, x30
+ tramp_unmap_kernel x30
+ .if \regsize == 64
+ mrs x30, far_el1
+ .endif
+ eret
+ .endm
+
+ .align 11
+ENTRY(tramp_vectors)
+ .space 0x400
+
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+ tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+ tramp_exit 32
+END(tramp_exit_compat)
+
+ .ltorg
+ .popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+ .pushsection ".rodata", "a"
+ .align PAGE_SHIFT
+ .globl __entry_tramp_data_start
+__entry_tramp_data_start:
+ .quad vectors
+ .popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
/*
* Special system call wrappers.
*/
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c18658690993..d479185d29c9 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -474,7 +474,7 @@ ENDPROC(__primary_switched)
* end early head section, begin head code that is also used for
* hotplug and needs to have the same protections as the text region
*/
- .section ".idmap.text","ax"
+ .section ".idmap.text","awx"
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 409abc45bdb6..1b3eb67edefb 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -175,8 +175,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
- if (!root_ops)
+ if (!root_ops) {
+ kfree(ri);
return NULL;
+ }
ri->cfg = pci_acpi_setup_ecam_mapping(root);
if (!ri->cfg) {
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 57ae9d9ed9bb..199a23f058d5 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -871,15 +871,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (attr->exclude_idle)
return -EPERM;
- if (is_kernel_in_hyp_mode() &&
- attr->exclude_kernel != attr->exclude_hv)
- return -EINVAL;
+
+ /*
+ * If we're running in hyp mode, then we *are* the hypervisor.
+ * Therefore we ignore exclude_hv in this configuration, since
+ * there's no hypervisor to sample anyway. This is consistent
+ * with other architectures (x86 and Power).
+ */
+ if (is_kernel_in_hyp_mode()) {
+ if (!attr->exclude_kernel)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ } else {
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ }
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
- if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
- config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 37ea9cf81c00..ede56a74bf9f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -307,17 +307,17 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
static void tls_thread_switch(struct task_struct *next)
{
- unsigned long tpidr, tpidrro;
+ unsigned long tpidr;
tpidr = read_sysreg(tpidr_el0);
*task_user_tls(current) = tpidr;
- tpidr = *task_user_tls(next);
- tpidrro = is_compat_thread(task_thread_info(next)) ?
- next->thread.tp_value : 0;
+ if (is_compat_thread(task_thread_info(next)))
+ write_sysreg(next->thread.tp_value, tpidrro_el0);
+ else if (!arm64_kernel_unmapped_at_el0())
+ write_sysreg(0, tpidrro_el0);
- write_sysreg(tpidr, tpidr_el0);
- write_sysreg(tpidrro, tpidrro_el0);
+ write_sysreg(*task_user_tls(next), tpidr_el0);
}
/* Restore the UAO state depending on next's addr_limit */
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index df67652e46f0..5a4fbcc744d2 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -95,7 +95,7 @@ ENTRY(__cpu_suspend_enter)
ret
ENDPROC(__cpu_suspend_enter)
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index f6c003e9edbf..2783ff837a24 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -51,7 +51,7 @@ static const char *handler[]= {
"Error"
};
-int show_unhandled_signals = 1;
+int show_unhandled_signals = 0;
/*
* Dump out the contents of some kernel memory nicely...
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index b8deffa9e1bf..34d3ed64fe8e 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -56,6 +56,17 @@ jiffies = jiffies_64;
#define HIBERNATE_TEXT
#endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
+ *(.entry.tramp.text) \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#else
+#define TRAMP_TEXT
+#endif
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -128,6 +139,7 @@ SECTIONS
HYPERVISOR_TEXT
IDMAP_TEXT
HIBERNATE_TEXT
+ TRAMP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -221,6 +233,11 @@ SECTIONS
. += RESERVED_TTBR0_SIZE;
#endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
+
_end = .;
STABS_DEBUG
@@ -240,7 +257,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
<= SZ_4K, "Hibernate exit text too big or misaligned")
#endif
-
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+ "Entry trampoline text too big")
+#endif
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3f9e15722473..d3e0a2ffb383 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
- + NUM_TIMER_REGS;
+ + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
}
/**
@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
+ ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += kvm_arm_get_fw_num_regs(vcpu);
+
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_get_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_set_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 2e6e9e99977b..efe43c5f2dc1 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -22,12 +22,15 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <kvm/arm_psci.h>
+
#include <asm/esr.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
-#include <asm/kvm_psci.h>
+#include <asm/debug-monitors.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -42,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
- ret = kvm_psci_call(vcpu);
+ ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
@@ -53,7 +56,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
vcpu_set_reg(vcpu, 0, ~0UL);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e92399f7105..4e9d50c3e658 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/alternative.h>
@@ -79,10 +80,11 @@ alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
+ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap
- mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
- cbnz x1, el1_trap // called HVC
+ mrs x1, vttbr_el2 // If vttbr is valid, the guest
+ cbnz x1, el1_hvc_guest // called HVC
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
@@ -101,6 +103,20 @@ alternative_endif
2: eret
+el1_hvc_guest:
+ /*
+ * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+ * The workaround has already been applied on the host,
+ * so let's quickly get back to the guest. We don't bother
+ * restoring x1, as it can be clobbered anyway.
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+ cbnz w1, el1_trap
+ mov x0, x1
+ add sp, sp, #16
+ eret
+
el1_trap:
/*
* x0: ESR_EC
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 0c848c18ca44..c49d09387192 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,9 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+#include <uapi/linux/psci.h>
+
+#include <kvm/arm_psci.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -50,7 +53,7 @@ static void __hyp_text __activate_traps_vhe(void)
val &= ~CPACR_EL1_FPEN;
write_sysreg(val, cpacr_el1);
- write_sysreg(__kvm_hyp_vector, vbar_el1);
+ write_sysreg(kvm_get_hyp_vector(), vbar_el1);
}
static void __hyp_text __activate_traps_nvhe(void)
@@ -404,6 +407,7 @@ void __hyp_text __noreturn __hyp_panic(void)
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __timer_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_host_state(host_ctxt);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index d7150e30438a..72aa167cdaf2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -21,7 +21,7 @@
.text
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
@@ -29,7 +29,7 @@
*
* Alignment fixed up by hardware.
*/
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
uaccess_enable_not_uao x2, x3
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
@@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
uaccess_disable_not_uao x2
ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 718b1c4e2f85..984570b0bd20 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,14 +64,14 @@
.endm
end .req x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
uaccess_disable_not_uao x3
mov x0, #0
ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 4c63cb154859..c841ccecbc4d 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
+#define asid2idx(asid) ((asid) & ~ASID_MASK)
+#define idx2asid(idx) asid2idx(idx)
+#endif
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
@@ -104,7 +113,7 @@ static void flush_context(unsigned int cpu)
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid & ~ASID_MASK, asid_map);
+ __set_bit(asid2idx(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -159,16 +168,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- asid &= ~ASID_MASK;
- if (!__test_and_set_bit(asid, asid_map))
+ if (!__test_and_set_bit(asid2idx(asid), asid_map))
return newasid;
}
/*
* Allocate a free ASID. If we can't find one, take a note of the
- * currently active ASIDs and mark the TLBs as requiring flushes.
- * We always count from ASID #1, as we use ASID #0 when setting a
- * reserved TTBR0 for the init_mm.
+ * currently active ASIDs and mark the TLBs as requiring flushes. We
+ * always count from ASID #2 (index 1), as we use ASID #0 when setting
+ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+ * pairs.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid != NUM_USER_ASIDS)
@@ -185,7 +194,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return asid | generation;
+ return idx2asid(asid) | generation;
}
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -221,6 +230,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
+
+ arm64_apply_bp_hardening();
+
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
* emulating PAN.
@@ -229,6 +241,15 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+ asm(ALTERNATIVE("nop; nop; nop",
+ "ic iallu; dsb nsh; isb",
+ ARM64_WORKAROUND_CAVIUM_27456,
+ CONFIG_CAVIUM_ERRATUM_27456));
+}
+
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index cf4e3f53a1e3..e4e7b1991985 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -338,7 +338,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < USER_DS && is_permission_fault(esr, regs)) {
+ if (addr < TASK_SIZE && is_permission_fault(esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -596,6 +596,29 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
+asmlinkage void __exception do_el0_irq_bp_hardening(void)
+{
+ /* PC has already been checked in entry.S */
+ arm64_apply_bp_hardening();
+}
+
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (addr > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
+ local_irq_enable();
+ do_mem_abort(addr, esr, regs);
+}
+
+
/*
* Handle stack alignment exceptions.
*/
@@ -606,6 +629,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct siginfo info;
struct task_struct *tsk = current;
+ if (user_mode(regs)) {
+ if (instruction_pointer(regs) > TASK_SIZE)
+ arm64_apply_bp_hardening();
+ local_irq_enable();
+ }
+
if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
tsk->comm, task_pid_nr(tsk),
@@ -665,6 +694,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
+ if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
if (!inf->fn(addr, esr, regs)) {
rv = 1;
} else {
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 01c171723bb3..caf75abf6ae7 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -18,6 +18,7 @@
#include <linux/elf.h>
#include <linux/fs.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/export.h>
@@ -102,12 +103,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
*/
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
- if (addr < PHYS_OFFSET)
- return 0;
- if (addr + size > __pa(high_memory - 1) + 1)
- return 0;
-
- return 1;
+ /*
+ * Check whether addr is covered by a memory region without the
+ * MEMBLOCK_NOMAP attribute, and whether that region covers the
+ * entire range. In theory, this could lead to false negatives
+ * if the range is covered by distinct but adjacent memory regions
+ * that only differ in other attributes. However, few of such
+ * attributes have been defined, and it is debatable whether it
+ * follows that /dev/mem read() calls should be able traverse
+ * such boundaries.
+ */
+ return memblock_is_region_memory(addr, size) &&
+ memblock_is_map_memory(addr);
}
/*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f1f1d64d0715..5b1184c369d5 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -422,6 +422,37 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
vm_area_add_early(vma);
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
+{
+ extern char __entry_tramp_text_start[];
+
+ pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+ /* The trampoline is always mapped and can therefore be global */
+ pgprot_val(prot) &= ~PTE_NG;
+
+ /* Map only the text into the trampoline page table */
+ memset(tramp_pg_dir, 0, PGD_SIZE);
+ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+ prot, pgd_pgtable_alloc, 0);
+
+ /* Map both the text and data into the kernel page table */
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ extern char __entry_tramp_data_start[];
+
+ __set_fixmap(FIX_ENTRY_TRAMP_DATA,
+ __pa_symbol(__entry_tramp_data_start),
+ PAGE_KERNEL_RO);
+ }
+
+ return 0;
+}
+core_initcall(map_entry_trampoline);
+#endif
+
/*
* Create fine-grained mappings for the kernel.
*/
@@ -775,3 +806,13 @@ int pmd_clear_huge(pmd_t *pmd)
pmd_clear(pmd);
return 1;
}
+
+int pud_free_pmd_page(pud_t *pud)
+{
+ return pud_none(*pud);
+}
+
+int pmd_free_pte_page(pmd_t *pmd)
+{
+ return pmd_none(*pmd);
+}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 32682be978e0..9d37f4e61166 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend)
*
* x0: Address of context pointer
*/
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(cpu_do_resume)
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
@@ -138,15 +138,27 @@ ENDPROC(cpu_do_resume)
* - pgd_phys - physical address of new TTB
*/
ENTRY(cpu_do_switch_mm)
+ mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
- bfi x0, x1, #48, #16 // set the ASID
- msr ttbr0_el1, x0 // set TTBR0
+ bfi x2, x1, #48, #16 // set the ASID
+ msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
isb
- post_ttbr0_update_workaround
- ret
+ msr ttbr0_el1, x0 // now update TTBR0
+ isb
+ b post_ttbr_update_workaround // Back to C code...
ENDPROC(cpu_do_switch_mm)
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
+
+.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
+ adrp \tmp1, empty_zero_page
+ msr ttbr1_el1, \tmp1
+ isb
+ tlbi vmalle1
+ dsb nsh
+ isb
+.endm
+
/*
* void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
*
@@ -157,13 +169,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
mrs x2, daif
msr daifset, #0xf
- adrp x1, empty_zero_page
- msr ttbr1_el1, x1
- isb
-
- tlbi vmalle1
- dsb nsh
- isb
+ __idmap_cpu_set_reserved_ttbr1 x1, x3
msr ttbr1_el1, x0
isb
@@ -174,13 +180,196 @@ ENTRY(idmap_cpu_replace_ttbr1)
ENDPROC(idmap_cpu_replace_ttbr1)
.popsection
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ .pushsection ".idmap.text", "awx"
+
+ .macro __idmap_kpti_get_pgtable_ent, type
+ dc cvac, cur_\()\type\()p // Ensure any existing dirty
+ dmb sy // lines are written back before
+ ldr \type, [cur_\()\type\()p] // loading the entry
+ tbz \type, #0, next_\()\type // Skip invalid entries
+ .endm
+
+ .macro __idmap_kpti_put_pgtable_ent_ng, type
+ orr \type, \type, #PTE_NG // Same bit for blocks and pages
+ str \type, [cur_\()\type\()p] // Update the entry and ensure it
+ dc civac, cur_\()\type\()p // is visible to all CPUs.
+ .endm
+
+/*
+ * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ *
+ * Called exactly once from stop_machine context by each CPU found during boot.
+ */
+__idmap_kpti_flag:
+ .long 1
+ENTRY(idmap_kpti_install_ng_mappings)
+ cpu .req w0
+ num_cpus .req w1
+ swapper_pa .req x2
+ swapper_ttb .req x3
+ flag_ptr .req x4
+ cur_pgdp .req x5
+ end_pgdp .req x6
+ pgd .req x7
+ cur_pudp .req x8
+ end_pudp .req x9
+ pud .req x10
+ cur_pmdp .req x11
+ end_pmdp .req x12
+ pmd .req x13
+ cur_ptep .req x14
+ end_ptep .req x15
+ pte .req x16
+
+ mrs swapper_ttb, ttbr1_el1
+ adr flag_ptr, __idmap_kpti_flag
+
+ cbnz cpu, __idmap_kpti_secondary
+
+ /* We're the boot CPU. Wait for the others to catch up */
+ sevl
+1: wfe
+ ldaxr w18, [flag_ptr]
+ eor w18, w18, num_cpus
+ cbnz w18, 1b
+
+ /* We need to walk swapper, so turn off the MMU. */
+ mrs x18, sctlr_el1
+ bic x18, x18, #SCTLR_ELx_M
+ msr sctlr_el1, x18
+ isb
+
+ /* Everybody is enjoying the idmap, so we can rewrite swapper. */
+ /* PGD */
+ mov cur_pgdp, swapper_pa
+ add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
+do_pgd: __idmap_kpti_get_pgtable_ent pgd
+ tbnz pgd, #1, walk_puds
+ __idmap_kpti_put_pgtable_ent_ng pgd
+next_pgd:
+ add cur_pgdp, cur_pgdp, #8
+ cmp cur_pgdp, end_pgdp
+ b.ne do_pgd
+
+ /* Publish the updated tables and nuke all the TLBs */
+ dsb sy
+ tlbi vmalle1is
+ dsb ish
+ isb
+
+ /* We're done: fire up the MMU again */
+ mrs x18, sctlr_el1
+ orr x18, x18, #SCTLR_ELx_M
+ msr sctlr_el1, x18
+ isb
+
+ /* Set the flag to zero to indicate that we're all done */
+ str wzr, [flag_ptr]
+ ret
+
+ /* PUD */
+walk_puds:
+ .if CONFIG_PGTABLE_LEVELS > 3
+ pte_to_phys cur_pudp, pgd
+ add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
+do_pud: __idmap_kpti_get_pgtable_ent pud
+ tbnz pud, #1, walk_pmds
+ __idmap_kpti_put_pgtable_ent_ng pud
+next_pud:
+ add cur_pudp, cur_pudp, 8
+ cmp cur_pudp, end_pudp
+ b.ne do_pud
+ b next_pgd
+ .else /* CONFIG_PGTABLE_LEVELS <= 3 */
+ mov pud, pgd
+ b walk_pmds
+next_pud:
+ b next_pgd
+ .endif
+
+ /* PMD */
+walk_pmds:
+ .if CONFIG_PGTABLE_LEVELS > 2
+ pte_to_phys cur_pmdp, pud
+ add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
+do_pmd: __idmap_kpti_get_pgtable_ent pmd
+ tbnz pmd, #1, walk_ptes
+ __idmap_kpti_put_pgtable_ent_ng pmd
+next_pmd:
+ add cur_pmdp, cur_pmdp, #8
+ cmp cur_pmdp, end_pmdp
+ b.ne do_pmd
+ b next_pud
+ .else /* CONFIG_PGTABLE_LEVELS <= 2 */
+ mov pmd, pud
+ b walk_ptes
+next_pmd:
+ b next_pud
+ .endif
+
+ /* PTE */
+walk_ptes:
+ pte_to_phys cur_ptep, pmd
+ add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
+do_pte: __idmap_kpti_get_pgtable_ent pte
+ __idmap_kpti_put_pgtable_ent_ng pte
+next_pte:
+ add cur_ptep, cur_ptep, #8
+ cmp cur_ptep, end_ptep
+ b.ne do_pte
+ b next_pmd
+
+ /* Secondary CPUs end up here */
+__idmap_kpti_secondary:
+ /* Uninstall swapper before surgery begins */
+ __idmap_cpu_set_reserved_ttbr1 x18, x17
+
+ /* Increment the flag to let the boot CPU we're ready */
+1: ldxr w18, [flag_ptr]
+ add w18, w18, #1
+ stxr w17, w18, [flag_ptr]
+ cbnz w17, 1b
+
+ /* Wait for the boot CPU to finish messing around with swapper */
+ sevl
+1: wfe
+ ldxr w18, [flag_ptr]
+ cbnz w18, 1b
+
+ /* All done, act like nothing happened */
+ msr ttbr1_el1, swapper_ttb
+ isb
+ ret
+
+ .unreq cpu
+ .unreq num_cpus
+ .unreq swapper_pa
+ .unreq swapper_ttb
+ .unreq flag_ptr
+ .unreq cur_pgdp
+ .unreq end_pgdp
+ .unreq pgd
+ .unreq cur_pudp
+ .unreq end_pudp
+ .unreq pud
+ .unreq cur_pmdp
+ .unreq end_pmdp
+ .unreq pmd
+ .unreq cur_ptep
+ .unreq end_ptep
+ .unreq pte
+ENDPROC(idmap_kpti_install_ng_mappings)
+ .popsection
+#endif
+
/*
* __cpu_setup
*
* Initialise the processor for turning the MMU on. Return in x0 the
* value of the SCTLR_EL1 register.
*/
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
@@ -224,7 +413,7 @@ ENTRY(__cpu_setup)
* both user and kernel.
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
tcr_set_idmap_t0sz x10, x9
/*
@@ -242,6 +431,11 @@ ENTRY(__cpu_setup)
cbz x9, 2f
cmp x9, #2
b.lt 1f
+#ifdef CONFIG_ARM64_ERRATUM_1024718
+ /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
+ cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
+ cbnz x1, 1f
+#endif
orr x10, x10, #TCR_HD // hardware Dirty flag update
1: orr x10, x10, #TCR_HA // hardware Access flag update
2:
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index d8199e12fb6e..b47a26f4290c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -234,8 +234,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
off = offsetof(struct bpf_array, map.max_entries);
emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR32(tmp, r2, tmp), ctx);
+ emit(A64_MOV(0, r3, r3), ctx);
emit(A64_CMP(0, r3, tmp), ctx);
- emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
+ emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
@@ -243,7 +244,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
emit(A64_CMP(1, tcc, tmp), ctx);
- emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
+ emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
/* prog = array->ptrs[index];
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 4bea27f50a7a..2702bd802d44 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
#include <asm/errno.h>
#include <asm/uaccess.h>
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr);
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bddefdacf..139093fab326 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
#define vxtime_lock() do {} while (0)
#define vxtime_unlock() do {} while (0)
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data __attribute__((__section__(".data")))
+
#endif
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9e5098..37f7b2bf7f73 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o
/*
* do the futex operations
*/
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS; break;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
-} /* end futex_atomic_op_inuser() */
+} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8434da..c607b77c8215 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
pagefault_disable();
@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd5c060..6d67dc1eaf2b 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 6ab0ae7d6535..d1d945c6bd05 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -153,7 +153,7 @@ slot (const struct insn *insn)
static int
apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
{
- if (slot(insn) != 2) {
+ if (slot(insn) != 1 && slot(insn) != 2) {
printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
mod->name, slot(insn));
return 0;
@@ -165,7 +165,7 @@ apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
static int
apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
{
- if (slot(insn) != 2) {
+ if (slot(insn) != 1 && slot(insn) != 2) {
printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
mod->name, slot(insn));
return 0;
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f056f43..a9dad9e5e132 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
})
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e17c430bfee2..faae20d9b463 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -95,6 +95,7 @@ config MIPS_GENERIC
select PCI_DRIVERS_GENERIC
select PINCTRL
select SMP_UP if SMP
+ select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS32_R6
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 9ab48ff80c1c..6d11ae581ea7 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
}
board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
+ if (!board_data)
+ goto error;
ath25_board.config = (struct ath25_boarddata *)board_data;
memcpy_fromio(board_data, bcfg, 0x100);
if (broken_boarddata) {
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index c1eb1ff7c800..6ed1ded87b8f 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
}
host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
raw_spin_lock_init(&host_data->lock);
addr = of_get_address(ciu_node, 0, NULL, NULL);
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190bdfb9c..a9e61ea54ca9 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
}
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index daba1f9a4f79..174aedce3167 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t;
#define flush_insn_slot(p) \
do { \
- flush_icache_range((unsigned long)p->addr, \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index d21f3da7bdb6..c0be540e83cb 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -18,6 +18,10 @@
#include <asm-generic/pgtable-nopmd.h>
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
extern int temp_tlb_entry;
/*
@@ -61,7 +65,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define VMALLOC_START MAP_BASE
-#define PKMAP_BASE (0xfe000000UL)
+#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 89fa5c0b1579..c92f4c28db7f 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1257,6 +1257,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
+#ifdef CONFIG_CPU_MICROMIPS
+/* micromips memset / bzero also clobbers t7 & t8 */
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
+#else
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
+#endif /* CONFIG_CPU_MICROMIPS */
+
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
@@ -1266,7 +1273,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+ : bzero_clobbers);
} else {
might_fault();
__asm__ __volatile__(
@@ -1277,7 +1284,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+ : bzero_clobbers);
}
return res;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index d8227f289d7f..9a4aed652736 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1096,10 +1096,20 @@ repeat:
}
break;
- case beql_op:
- case bnel_op:
case blezl_op:
case bgtzl_op:
+ /*
+ * For BLEZL and BGTZL, rt field must be set to 0. If this
+ * is not the case, this may be an encoding of a MIPS R6
+ * instruction, so return to CPU execution if this occurs
+ */
+ if (MIPSInst_RT(inst)) {
+ err = SIGILL;
+ break;
+ }
+ /* fall through */
+ case beql_op:
+ case bnel_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
@@ -2329,6 +2339,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
__this_cpu_write((mipsr2bremustats).bgezl, 0);
__this_cpu_write((mipsr2bremustats).bltzll, 0);
__this_cpu_write((mipsr2bremustats).bgezll, 0);
+ __this_cpu_write((mipsr2bremustats).bltzall, 0);
+ __this_cpu_write((mipsr2bremustats).bgezall, 0);
__this_cpu_write((mipsr2bremustats).bltzal, 0);
__this_cpu_write((mipsr2bremustats).bgezal, 0);
__this_cpu_write((mipsr2bremustats).beql, 0);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 47c9646f93b3..d4a293b68249 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -166,11 +166,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
return;
}
- if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
- "smp_ipi0", NULL))
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
panic("Can't request IPI0 interrupt");
- if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
- "smp_ipi1", NULL))
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
panic("Can't request IPI1 interrupt");
}
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 0344e575f522..fba4ca56e46a 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -15,4 +15,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
# libgcc-style stuff needed in the kernel
-obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o
+obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
+ ucmpdi2.o
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 05909d58e2fe..56ea0df60a44 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -9,10 +9,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
struct DWstruct {
int high, low;
};
+
+struct TWstruct {
+ long long high, low;
+};
#elif defined(__LITTLE_ENDIAN)
struct DWstruct {
int low, high;
};
+
+struct TWstruct {
+ long long low, high;
+};
#else
#error I feel sick.
#endif
@@ -22,4 +30,13 @@ typedef union {
long long ll;
} DWunion;
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
+typedef int ti_type __attribute__((mode(TI)));
+
+typedef union {
+ struct TWstruct s;
+ ti_type ti;
+} TWunion;
+#endif
+
#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 18a1ccd4d134..2b1bf93b5c80 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -218,7 +218,7 @@
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
bne t1, a0, 1b
- sb a1, -1(a0)
+ EX(sb, a1, -1(a0), .Lsmall_fixup\@)
2: jr ra /* done */
move a2, zero
@@ -251,13 +251,18 @@
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
- LONG_ADDU a2, t1
+ LONG_ADDU a2, a0
jr ra
LONG_SUBU a2, t0
.Llast_fixup\@:
jr ra
- andi v1, a2, STORMASK
+ nop
+
+.Lsmall_fixup\@:
+ PTR_SUBU a2, t1, a0
+ jr ra
+ PTR_ADDIU a2, 1
.endm
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000000..111ad475aa0c
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+
+#include "libgcc.h"
+
+/*
+ * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
+ * specific case only we'll implement it here.
+ *
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+
+/* multiply 64-bit values, low 64-bits returned */
+static inline long long notrace dmulu(long long a, long long b)
+{
+ long long res;
+
+ asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
+static inline long long notrace dmuhu(long long a, long long b)
+{
+ long long res;
+
+ asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/* multiply 128-bit values, low 128-bits returned */
+ti_type notrace __multi3(ti_type a, ti_type b)
+{
+ TWunion res, aa, bb;
+
+ aa.ti = a;
+ bb.ti = b;
+
+ /*
+ * a * b = (a.lo * b.lo)
+ * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
+ * [+ 2^128 * (a.hi * b.hi)]
+ */
+ res.s.low = dmulu(aa.s.low, bb.s.low);
+ res.s.high = dmuhu(aa.s.low, bb.s.low);
+ res.s.high += dmulu(aa.s.high, bb.s.low);
+ res.s.high += dmulu(aa.s.low, bb.s.high);
+
+ return res.ti;
+}
+EXPORT_SYMBOL(__multi3);
+
+#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
/*
* Fixed mappings:
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 49a2e2226fee..248603739198 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -526,7 +526,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
u32 sflags, tmp_flags;
/* Adjust the stack pointer */
- emit_stack_offset(-align_sp(offset), ctx);
+ if (offset)
+ emit_stack_offset(-align_sp(offset), ctx);
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is essentially a bitmap */
@@ -578,7 +579,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
/* Restore the sp and discard the scrach memory */
- emit_stack_offset(align_sp(offset), ctx);
+ if (offset)
+ emit_stack_offset(align_sp(offset), ctx);
}
static unsigned int get_stack_depth(struct jit_ctx *ctx)
@@ -625,8 +627,14 @@ static void build_prologue(struct jit_ctx *ctx)
if (ctx->flags & SEEN_X)
emit_jit_reg_move(r_X, r_zero, ctx);
- /* Do not leak kernel data to userspace */
- if (bpf_needs_clear_a(&ctx->skf->insns[0]))
+ /*
+ * Do not leak kernel data to userspace, we only need to clear
+ * r_A if it is ever used. In fact if it is never used, we
+ * will not save/restore it, so clearing it in this case would
+ * corrupt the state of the caller.
+ */
+ if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
+ (ctx->flags & SEEN_A))
emit_jit_reg_move(r_A, r_zero, ctx);
}
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index 5d2e0c8d29c0..88a2075305d1 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive)
is_offset_in_header(2, half)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
- .set reorder
- lh $r_A, 0(t1)
- .set noreorder
+ lhu $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- wsbh t0, $r_A
- seh $r_A, t0
+ wsbh $r_A, $r_A
# else
- sll t0, $r_A, 24
- andi t1, $r_A, 0xff00
- sra t0, t0, 16
- srl t1, t1, 8
+ sll t0, $r_A, 8
+ srl t1, $r_A, 8
+ andi t0, t0, 0xff00
or $r_A, t0, t1
# endif
#endif
@@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive)
is_offset_in_header(1, byte)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
- lb $r_A, 0(t1)
+ lbu $r_A, 0(t1)
jr $r_ra
move $r_ret, zero
END(sk_load_byte)
@@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive)
* (void *to) is returned in r_s0
*
*/
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define DS_OFFSET(SIZE) (4 * SZREG)
+#else
+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
+#endif
#define bpf_slow_path_common(SIZE) \
/* Quick check. Are we within reasonable boundaries? */ \
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
@@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive)
PTR_LA t0, skb_copy_bits; \
PTR_S $r_ra, (5 * SZREG)($r_sp); \
/* Assign low slot to a2 */ \
- move a2, $r_sp; \
+ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
jalr t0; \
/* Reset our destination slot (DS but it's ok) */ \
INT_S zero, (4 * SZREG)($r_sp); \
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index 64543d66e76b..e9531fea23a2 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -96,16 +96,9 @@ static void ralink_restart(char *command)
unreachable();
}
-static void ralink_halt(void)
-{
- local_irq_disable();
- unreachable();
-}
-
static int __init mips_reboot_setup(void)
{
_machine_restart = ralink_restart;
- _machine_halt = ralink_halt;
return 0;
}
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 1d8c24dc04d4..88290d32b956 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -25,6 +25,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *);
void flush_user_dcache_range(unsigned long, unsigned long);
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index ac8bd586ace8..06a1a883c72f 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,22 +32,12 @@ _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
}
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
unsigned long int flags;
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
- return -EFAULT;
-
_futex_spin_lock_irqsave(uaddr, &flags);
pagefault_disable();
@@ -85,17 +75,9 @@ out_pagefault_enable:
pagefault_enable();
_futex_spin_unlock_irqrestore(uaddr, &flags);
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index df757c9675e6..8a0822125f8b 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -464,10 +464,10 @@ EXPORT_SYMBOL(copy_user_page);
int __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
{
- unsigned long flags, size;
+ unsigned long flags;
- size = (end - start);
- if (size >= parisc_tlb_flush_threshold) {
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ end - start >= parisc_tlb_flush_threshold) {
flush_tlb_all();
return 1;
}
@@ -538,13 +538,12 @@ void flush_cache_mm(struct mm_struct *mm)
struct vm_area_struct *vma;
pgd_t *pgd;
- /* Flush the TLB to avoid speculation if coherency is required. */
- if (parisc_requires_coherency())
- flush_tlb_all();
-
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ if (mm->context)
+ flush_tlb_all();
flush_cache_all();
return;
}
@@ -552,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
- if ((vma->vm_flags & VM_EXEC) == 0)
- continue;
- flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end);
}
return;
}
@@ -572,6 +571,8 @@ void flush_cache_mm(struct mm_struct *mm)
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
+ if (unlikely(mm->context))
+ flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -598,30 +599,45 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- BUG_ON(!vma->vm_mm->context);
-
- /* Flush the TLB to avoid speculation if coherency is required. */
- if (parisc_requires_coherency())
- flush_tlb_range(vma, start, end);
+ pgd_t *pgd;
+ unsigned long addr;
- if ((end - start) >= parisc_cache_flush_threshold
- || vma->vm_mm->context != mfsp(3)) {
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ end - start >= parisc_cache_flush_threshold) {
+ if (vma->vm_mm->context)
+ flush_tlb_range(vma, start, end);
flush_cache_all();
return;
}
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ flush_tlb_range(vma, start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn)) {
+ if (unlikely(vma->vm_mm->context))
+ flush_tlb_page(vma, addr);
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- BUG_ON(!vma->vm_mm->context);
-
if (pfn_valid(pfn)) {
- if (parisc_requires_coherency())
+ if (likely(vma->vm_mm->context))
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
@@ -630,21 +646,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
- if ((unsigned long)size > parisc_cache_flush_threshold)
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ (unsigned long)size >= parisc_cache_flush_threshold) {
+ flush_tlb_kernel_range(start, end);
flush_data_cache();
- else
- flush_kernel_dcache_range_asm(start, start + size);
+ return;
+ }
+
+ flush_kernel_dcache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
- if ((unsigned long)size > parisc_cache_flush_threshold)
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ (unsigned long)size >= parisc_cache_flush_threshold) {
+ flush_tlb_kernel_range(start, end);
flush_data_cache();
- else
- flush_kernel_dcache_range_asm(start, start + size);
+ return;
+ }
+
+ purge_kernel_dcache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 700e2d2da096..2e68ca1fe0db 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
(modpath->mod == PCI_FUNC(devfn)));
}
+ /* index might be out of bounds for bc[] */
+ if (index >= 6)
+ return 0;
+
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 2d40c4ff3f69..67b0f7532e83 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
.procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
+ENTRY_CFI(purge_kernel_dcache_range_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: cmpb,COND(<<),n %r26, %r25,1b
+ pdc,m %r23(%r26)
+
+ sync
+ syncdma
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC_CFI(purge_kernel_dcache_range_asm)
+
ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index c0deafc212b8..798ab37c9930 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,8 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#ifdef __SUBARCH_HAS_LWSYNC
+/* The sub-arch has lwsync */
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
# define SMPWMB LWSYNC
#else
# define SMPWMB eieio
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 2015b072422c..b4ab1f497335 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -30,6 +30,7 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
int instr_is_relative_branch(unsigned int instr);
+int instr_is_relative_link_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
unsigned long branch_target(const unsigned int *instr);
unsigned int translate_branch(const unsigned int *dest,
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index ab68d0ee7725..4e54282c29b4 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -474,7 +474,8 @@ enum {
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300)
-#define CPU_FTRS_POWER9_DD1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1)
+#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
+ (~CPU_FTR_SAO))
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf845473b..f4c7467f7465 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -31,18 +31,10 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -68,17 +60,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cd4ffd86765f..bb9073a2b2ae 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -14,6 +14,10 @@
#include <asm-generic/module.h>
+#ifdef CC_USING_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
+#endif
+
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e958b7096f19..9e5e0d910b91 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -21,6 +21,9 @@
/* We calculate number of sg entries based on PAGE_SIZE */
#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
+#define OPAL_BUSY_DELAY_MS 10
+
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7e6100..71c69883125a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * On hash the vmalloc and other regions alias to the kernel region when passed
+ * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
+ * return true for some vmalloc addresses, which is incorrect. So explicitly
+ * check that the address is in the kernel region.
+ */
+#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
+ pfn_valid(virt_to_pfn(kaddr)))
+#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 78efe8d5d775..30f2d6d4c640 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,10 +5,6 @@
#include <linux/stringify.h>
#include <asm/feature-fixups.h>
-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
-#define __SUBARCH_HAS_LWSYNC
-#endif
-
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6ef8f0bceacd..27843665da9e 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -207,18 +207,18 @@ static void *eeh_report_error(void *data, void *userdata)
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_frozen;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
eeh_disable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->error_detected) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ !driver->err_handler->error_detected)
+ goto out;
rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
@@ -227,7 +227,10 @@ static void *eeh_report_error(void *data, void *userdata)
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
edev->in_error = true;
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -250,15 +253,14 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+ device_lock(&dev->dev);
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
if (!driver->err_handler ||
!driver->err_handler->mmio_enabled ||
- (edev->mode & EEH_DEV_NO_HANDLER)) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ (edev->mode & EEH_DEV_NO_HANDLER))
+ goto out;
rc = driver->err_handler->mmio_enabled(dev);
@@ -266,7 +268,10 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -289,20 +294,20 @@ static void *eeh_report_reset(void *data, void *userdata)
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
eeh_enable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->slot_reset ||
(edev->mode & EEH_DEV_NO_HANDLER) ||
- (!edev->in_error)) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ (!edev->in_error))
+ goto out;
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -310,7 +315,10 @@ static void *eeh_report_reset(void *data, void *userdata)
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -361,10 +369,12 @@ static void *eeh_report_resume(void *data, void *userdata)
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
was_in_error = edev->in_error;
edev->in_error = false;
@@ -374,13 +384,15 @@ static void *eeh_report_resume(void *data, void *userdata)
!driver->err_handler->resume ||
(edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
edev->mode &= ~EEH_DEV_NO_HANDLER;
- eeh_pcid_put(dev);
- return NULL;
+ goto out;
}
driver->err_handler->resume(dev);
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -400,22 +412,25 @@ static void *eeh_report_failure(void *data, void *userdata)
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_perm_failure;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
eeh_disable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->error_detected) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ !driver->err_handler->error_detected)
+ goto out;
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index de7d091c4c31..1abd8dd77ec1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -795,7 +795,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
eeh_bridge_check_link(edev);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 7614d1dd2c0b..94b5dfb087e9 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -723,7 +723,7 @@ EXC_COMMON_BEGIN(bad_addr_slb)
ld r3, PACA_EXSLB+EX_DAR(r13)
std r3, _DAR(r1)
beq cr6, 2f
- li r10, 0x480 /* fix trap number for I-SLB miss */
+ li r10, 0x481 /* fix trap number for I-SLB miss */
std r10, _TRAP(r1)
2: bl save_nvgprs
addi r3, r1, STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a75e2dd3e71f..f17bfa2a5318 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -372,6 +372,14 @@ void force_external_irq_replay(void)
*/
WARN_ON(!arch_irqs_disabled());
+ /*
+ * Interrupts must always be hard disabled before irq_happened is
+ * modified (to prevent lost update in case of interrupt between
+ * load and store).
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
/* Indicate in the PACA that we have an interrupt to replay */
local_paca->irq_happened |= PACA_IRQ_EE;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 183368e008cf..99407cf12ad5 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -494,7 +494,17 @@ static bool is_early_mcount_callsite(u32 *instruction)
restore r2. */
static int restore_r2(u32 *instruction, struct module *me)
{
- if (is_early_mcount_callsite(instruction - 1))
+ u32 *prev_insn = instruction - 1;
+
+ if (is_early_mcount_callsite(prev_insn))
+ return 1;
+
+ /*
+ * Make sure the branch isn't a sibling call. Sibling calls aren't
+ * "link" branches and they don't return, so they don't need the r2
+ * restore afterwards.
+ */
+ if (!instr_is_relative_link_branch(*prev_insn))
return 1;
if (*instruction != PPC_INST_NOP) {
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f1d7e996e673..ab7b661b6da3 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -719,12 +719,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
static void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ unsigned int tcr;
+
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
- /* Enable decrementer interrupt */
- mtspr(SPRN_TCR, TCR_DIE);
-#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
+ tcr = mfspr(SPRN_TCR);
+ /*
+ * The watchdog may have already been enabled by u-boot. So leave
+ * TRC[WP] (Watchdog Period) alone.
+ */
+ tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
+ tcr |= TCR_DIE; /* Enable decrementer */
+ mtspr(SPRN_TCR, tcr);
+#endif
}
void __init generic_calibrate_decr(void)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index a587e8f4fd26..4b4e927c4822 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -177,12 +177,15 @@ map_again:
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
hpsize, hpsize, MMU_SEGSIZE_256M);
- if (ret < 0) {
+ if (ret == -1) {
/* If we couldn't map a primary PTE, try a secondary */
hash = ~hash;
vflags ^= HPTE_V_SECONDARY;
attempt++;
goto map_again;
+ } else if (ret < 0) {
+ r = -EIO;
+ goto out_unlock;
} else {
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 55fbc0c78721..79a180cf4c94 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -299,7 +299,6 @@ kvm_novcpu_exit:
stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
- lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@@ -1023,6 +1022,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
secondary_too_late:
li r12, 0
+ stw r12, STACK_SLOT_TRAP(r1)
cmpdi r4, 0
beq 11f
stw r12, VCPU_TRAP(r4)
@@ -1266,12 +1266,12 @@ mc_cont:
bl kvmhv_accumulate_time
#endif
+ stw r12, STACK_SLOT_TRAP(r1)
mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
nop
ld r9, HSTATE_KVM_VCPU(r13)
- lwz r12, VCPU_TRAP(r9)
/* Stop others sending VCPU interrupts to this physical CPU */
li r0, -1
@@ -1549,6 +1549,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
+ * Here STACK_SLOT_TRAP(r1) contains the trap number.
*/
kvmhv_switch_to_host:
/* Secondary threads wait for primary to do partition switch */
@@ -1599,11 +1600,11 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* If HMI, call kvmppc_realmode_hmi_handler() */
+ lwz r12, STACK_SLOT_TRAP(r1)
cmpwi r12, BOOK3S_INTERRUPT_HMI
bne 27f
bl kvmppc_realmode_hmi_handler
nop
- li r12, BOOK3S_INTERRUPT_HMI
/*
* At this point kvmppc_realmode_hmi_handler would have resync-ed
* the TB. Hence it is not required to subtract guest timebase
@@ -1678,6 +1679,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
+ lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, SFS
mtlr r0
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 826c541a12af..e0d88d0890aa 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -627,7 +627,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_mmu_unmap_page(vcpu, &pte);
}
/* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte, iswrite);
+ if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
+ /* Exit KVM if mapping failed */
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
if (data)
vcpu->stat.sp_storage++;
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 02176fd52f84..286a3b051ff6 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
pteg_addr = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
+ ret = H_FUNCTION;
+ if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
+ goto done;
hpte = pteg;
ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
- copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
+ goto done;
kvmppc_set_gpr(vcpu, 4, pte_index | i);
ret = H_SUCCESS;
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
goto done;
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
+ goto done;
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
+ ret = H_FUNCTION;
+ break;
+ }
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
+ ret = H_FUNCTION;
+ break;
+ }
rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
pte[0] = (__force u64)cpu_to_be64(pte[0]);
pte[1] = (__force u64)cpu_to_be64(pte[1]);
- copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
+ goto done;
ret = H_SUCCESS;
done:
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index d5edbeb8eb82..753d591f1b52 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -95,6 +95,11 @@ int instr_is_relative_branch(unsigned int instr)
return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
}
+int instr_is_relative_link_branch(unsigned int instr)
+{
+ return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK);
+}
+
static unsigned long branch_iform_target(const unsigned int *instr)
{
signed long imm;
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index e86bfa111f3c..46c8338a61bc 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
unsigned int *target = (unsigned int *)branch_target(src);
/* Branch within the section doesn't need translating */
- if (target < alt_start || target >= alt_end) {
+ if (target < alt_start || target > alt_end) {
instr = translate_branch(dest, src);
if (!instr)
return 1;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d0b137d96df1..9376e8e53bfa 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -294,7 +294,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* can result in fault, which will cause a deadlock when called with
* mmap_sem held
*/
- if (user_mode(regs))
+ if (!is_exec && user_mode(regs))
store_update_sp = store_updates_sp(regs);
if (user_mode(regs))
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a5d3ecdabc44..035dfb65df4b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -765,6 +765,24 @@ static int __init add_huge_page_size(unsigned long long size)
if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
return -EINVAL;
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * We need to make sure that for different page sizes reported by
+ * firmware we only add hugetlb support for page sizes that can be
+ * supported by linux page table layout.
+ * For now we have
+ * Radix: 2M
+ * Hash: 16M and 16G
+ */
+ if (radix_enabled()) {
+ if (mmu_psize != MMU_PAGE_2M)
+ return -EINVAL;
+ } else {
+ if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
+ return -EINVAL;
+ }
+#endif
+
BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
/* Return if huge page size has already been setup */
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 050badc0ebd3..0b50019505a5 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -751,7 +751,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* avoid going over total available memory just in case...
*/
#ifdef CONFIG_PPC_FSL_BOOK3E
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
unsigned int num_cams;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0fe98a567125..be9d968244ad 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -245,6 +245,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* goto out;
*/
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
+ PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
PPC_BCC(COND_GE, out);
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 85c85eb3e245..b4abf9d5d9e1 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
+
+ rc = 0;
out:
free_page((unsigned long)buf);
return rc;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 9db4398ded5d..1bceb95f422d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -11,6 +11,7 @@
#define DEBUG
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ }
}
+
+ if (rc)
+ return -EIO;
+
*index += count;
return count;
}
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f8868864f373..aa2a5139462e 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (rc == OPAL_BUSY)
- mdelay(10);
+ } else if (rc == OPAL_BUSY) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ }
}
if (rc != OPAL_SUCCESS)
return 0;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 3e828b20c21e..2842f9d63d21 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -79,7 +79,7 @@ unsigned int mpc8xx_get_irq(void)
irq = in_be32(&siu_reg->sc_sivec) >> 26;
if (irq == PIC_VEC_SPURRIOUS)
- irq = 0;
+ return 0;
return irq_linear_revmap(mpc8xx_pic_host, irq);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9aa0d04c9dcc..1c4a595e8224 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@ config S390
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
@@ -704,6 +705,51 @@ config SECCOMP
If unsure, say Y.
+config KERNEL_NOBP
+ def_bool n
+ prompt "Enable modified branch prediction for the kernel by default"
+ help
+ If this option is selected the kernel will switch to a modified
+ branch prediction mode if the firmware interface is available.
+ The modified branch prediction mode improves the behaviour in
+ regard to speculative execution.
+
+ With the option enabled the kernel parameter "nobp=0" or "nospec"
+ can be used to run the kernel in the normal branch prediction mode.
+
+ With the option disabled the modified branch prediction mode is
+ enabled with the "nobp=1" kernel parameter.
+
+ If unsure, say N.
+
+config EXPOLINE
+ def_bool n
+ prompt "Avoid speculative indirect branches in the kernel"
+ help
+ Compile the kernel with the expoline compiler options to guard
+ against kernel-to-user data leaks by avoiding speculative indirect
+ branches.
+ Requires a compiler with -mindirect-branch=thunk support for full
+ protection. The kernel may run slower.
+
+ If unsure, say N.
+
+choice
+ prompt "Expoline default"
+ depends on EXPOLINE
+ default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+ bool "spectre_v2=off"
+
+config EXPOLINE_AUTO
+ bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+ bool "spectre_v2=on"
+
+endchoice
+
endmenu
menu "Power Management"
@@ -753,6 +799,7 @@ config PFAULT
config SHARED_KERNEL
bool "VM shared kernel support"
depends on !JUMP_LABEL
+ depends on !ALTERNATIVES
help
Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 54e00526b8df..bef67c0f63e2 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -79,6 +79,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_EXPOLINE
+ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+ CC_FLAGS_EXPOLINE += -mindirect-branch-table
+ export CC_FLAGS_EXPOLINE
+ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ endif
+endif
+
ifdef CONFIG_FUNCTION_TRACER
# make use of hotpatch feature if the compiler supports it
cc_hotpatch := -mhotpatch=0,3
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb224d03..2a17123130d3 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb)
if (sb->s_root)
hypfs_delete_tree(sb->s_root);
- if (sb_info->update_file)
+ if (sb_info && sb_info->update_file)
hypfs_remove(sb_info->update_file);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..a72002056b54
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 5c8db3ce61c8..03b2e5bf1206 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -48,6 +48,30 @@ do { \
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ if (__builtin_constant_p(size) && size > 0) {
+ asm(" clgr %2,%1\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+ return mask;
+ }
+ asm(" clgr %1,%2\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size), "d" (index) :"cc");
+ return ~mask;
+}
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 09b406db7529..5811e7849a2e 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -15,7 +15,25 @@
#include <linux/preempt.h>
#include <asm/lowcore.h>
-#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
+#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
static inline int __test_facility(unsigned long nr, void *facilities)
{
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa0304d..8f8eec9e1198 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, newval, ret;
load_kernel_asce();
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
pagefault_disable();
switch (op) {
@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a41faf34b034..5792590d0e7c 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -181,7 +181,8 @@ struct kvm_s390_sie_block {
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
- __u8 reserved60; /* 0x0060 */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
#define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78f423c..ad4e0cee1557 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -135,7 +135,9 @@ struct lowcore {
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0440 */
+ __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -150,7 +152,8 @@ struct lowcore {
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u64 stfle_fac_list[16]; /* 0x0f00 */
+ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..b4bd8c41e9d3
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_disable;
+
+void nospec_init_branches(void);
+void nospec_auto_detect(void);
+void nospec_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6bcbbece082b..d5842126ec70 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,6 +84,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
+extern void __bpon(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -359,6 +360,9 @@ extern void memcpy_absolute(void *, void *, size_t);
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c0398c363..84f2ae44b4e9 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -79,6 +79,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
+#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -94,6 +96,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
+#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a2ffec4139ad..81c02e198527 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
+#define KVM_SYNC_BPBC (1UL << 10)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -217,7 +218,9 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding[52]; /* riccb needs to be 64byte aligned */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
+ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
};
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98f6db9..0501cac2ab95 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp.o += -march=z900
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
CFLAGS_als.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
@@ -57,10 +58,13 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
-obj-y += entry.o reipl.o relocate_kernel.o
+obj-y += entry.o reipl.o relocate_kernel.o alternative.o
+obj-y += nospec-branch.o
extra-y += head.o head64.o vmlinux.lds
+CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 000000000000..b57b293998dc
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+ alt_instr_disabled = 1;
+ return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+ 0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+ &nop16,
+ &nop32,
+ &nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+ struct brcl_insn brcl = {
+ 0xc0f4,
+ len / 2
+ };
+
+ memcpy(insns, &brcl, sizeof(brcl));
+ insns += sizeof(brcl);
+ len -= sizeof(brcl);
+
+ while (len > 0) {
+ memcpy(insns, &nop16, 2);
+ insns += 2;
+ len -= 2;
+ }
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+ if (len > 6)
+ add_jump_padding(insns, len);
+ else if (len >= 2)
+ memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ struct alt_instr *a;
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ */
+ for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+ if (!__test_facility(a->facility,
+ S390_lowcore.alt_stfle_fac_list))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ WARN_ONCE(1, "cpu alternatives instructions length is "
+ "odd, skipping patching\n");
+ continue;
+ }
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ if (a->instrlen > a->replacementlen) {
+ add_padding(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+
+ s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ }
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ if (!alt_instr_disabled)
+ __apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 29d87444a655..0c7a7d5d95f1 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -299,6 +299,11 @@ static noinline __init void setup_facility_list(void)
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ memcpy(S390_lowcore.alt_stfle_fac_list,
+ S390_lowcore.stfle_fac_list,
+ sizeof(S390_lowcore.alt_stfle_fac_list));
+ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
@@ -372,7 +377,7 @@ static int __init topology_setup(char *str)
rc = kstrtobool(str, &enabled);
if (!rc && !enabled)
- S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY;
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY;
return rc;
}
early_param("topology", topology_setup);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3bc2825173ef..1996afeb2e81 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -105,6 +105,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
j 3f
1: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@@ -163,6 +164,130 @@ _PIF_WORK = (_PIF_PER_TRAP)
tm off+\addr, \mask
.endm
+ .macro BPOFF
+ .pushsection .altinstr_replacement, "ax"
+660: .long 0xb2e8c000
+ .popsection
+661: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 661b - .
+ .long 660b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPON
+ .pushsection .altinstr_replacement, "ax"
+662: .long 0xb2e8d000
+ .popsection
+663: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 663b - .
+ .long 662b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPENTER tif_ptr,tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .popsection
+664: TSTMSK \tif_ptr,\tif_mask
+ jz . + 8
+ .long 0xb2e8d000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 12
+ .byte 12
+ .popsection
+ .endm
+
+ .macro BPEXIT tif_ptr,tif_mask
+ TSTMSK \tif_ptr,\tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: jnz . + 8
+ .long 0xb2e8d000
+ .popsection
+664: jz . + 8
+ .long 0xb2e8c000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 8
+ .byte 8
+ .popsection
+ .endm
+
+#ifdef CONFIG_EXPOLINE
+
+ .macro GEN_BR_THUNK name,reg,tmp
+ .section .text.\name,"axG",@progbits,\name,comdat
+ .globl \name
+ .hidden \name
+ .type \name,@function
+\name:
+ .cfi_startproc
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,0f
+#else
+ larl \tmp,0f
+ ex 0,0(\tmp)
+#endif
+ j .
+0: br \reg
+ .cfi_endproc
+ .endm
+
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+
+ .macro BASR_R14_R9
+0: brasl %r14,__s390x_indirect_jump_r1use_r9
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R1USE_R14
+0: jg __s390x_indirect_jump_r1use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R11USE_R14
+0: jg __s390x_indirect_jump_r11use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+#else /* CONFIG_EXPOLINE */
+
+ .macro BASR_R14_R9
+ basr %r14,%r9
+ .endm
+
+ .macro BR_R1USE_R14
+ br %r14
+ .endm
+
+ .macro BR_R11USE_R14
+ br %r14
+ .endm
+
+#endif /* CONFIG_EXPOLINE */
+
+
.section .kprobes.text, "ax"
.Ldummy:
/*
@@ -175,6 +300,11 @@ _PIF_WORK = (_PIF_PER_TRAP)
*/
nop 0
+ENTRY(__bpon)
+ .globl __bpon
+ BPON
+ BR_R1USE_R14
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -201,9 +331,9 @@ ENTRY(__switch_to)
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
- bzr %r14
+ jz 0f
.insn s,0xb2800000,__LC_LPP # set program parameter
- br %r14
+0: BR_R1USE_R14
.L__critical_start:
@@ -215,9 +345,11 @@ ENTRY(__switch_to)
*/
ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ lg %r12,__LC_CURRENT
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -234,7 +366,11 @@ ENTRY(sie64a)
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
+ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
sie 0(%r14)
+.Lsie_exit:
+ BPOFF
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -255,9 +391,15 @@ ENTRY(sie64a)
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xgr %r0,%r0 # clear guest registers to
+ xgr %r1,%r1 # prevent speculative use
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
- br %r14
+ BR_R1USE_R14
.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
@@ -280,6 +422,7 @@ ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ BPOFF
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lghi %r14,_PIF_SYSCALL
@@ -289,12 +432,15 @@ ENTRY(system_call)
LAST_BREAK %r13
.Lsysc_vtime:
UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
+ # clear user controlled register to prevent speculative use
+ xgr %r0,%r0
lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
@@ -312,7 +458,7 @@ ENTRY(system_call)
lgf %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
+ BASR_R14_R9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
@@ -324,6 +470,7 @@ ENTRY(system_call)
jnz .Lsysc_work # check for work
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lsysc_work
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
@@ -451,7 +598,7 @@ ENTRY(system_call)
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
+ BASR_R14_R9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -475,7 +622,7 @@ ENTRY(ret_from_fork)
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
- basr %r14,%r9
+ BASR_R14_R9
j .Lsysc_tracenogo
/*
@@ -484,6 +631,7 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -508,6 +656,7 @@ ENTRY(pgm_check_handler)
j 3f
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
aghi %r14,__TASK_thread # pointer to thread_struct
@@ -517,6 +666,15 @@ ENTRY(pgm_check_handler)
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -538,9 +696,9 @@ ENTRY(pgm_check_handler)
nill %r10,0x007f
sll %r10,2
je .Lpgm_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
+ BASR_R14_R9 # branch to interrupt-handler
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -573,6 +731,7 @@ ENTRY(pgm_check_handler)
ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -580,6 +739,16 @@ ENTRY(io_int_handler)
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -614,9 +783,13 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno .Lio_exit_kernel
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
.Lio_done:
@@ -748,6 +921,7 @@ ENTRY(io_int_handler)
ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -755,6 +929,16 @@ ENTRY(ext_int_handler)
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
lghi %r1,__LC_EXT_PARAMS2
@@ -787,11 +971,12 @@ ENTRY(psw_idle)
.Lpsw_idle_stcctm:
#endif
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
+ BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
- br %r14
+ BR_R1USE_R14
.Lpsw_idle_end:
/*
@@ -805,7 +990,7 @@ ENTRY(save_fpu_regs)
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
+ jo .Lsave_fpu_regs_exit
stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
@@ -835,7 +1020,8 @@ ENTRY(save_fpu_regs)
std 15,120(%r3)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
- br %r14
+.Lsave_fpu_regs_exit:
+ BR_R1USE_R14
.Lsave_fpu_regs_end:
#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL(save_fpu_regs)
@@ -855,7 +1041,7 @@ load_fpu_regs:
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
+ jno .Lload_fpu_regs_exit
lfpc __THREAD_FPU_fpc(%r4)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
@@ -884,7 +1070,8 @@ load_fpu_regs:
ld 15,120(%r4)
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- br %r14
+.Lload_fpu_regs_exit:
+ BR_R1USE_R14
.Lload_fpu_regs_end:
.L__critical_end:
@@ -894,6 +1081,7 @@ load_fpu_regs:
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
+ BPOFF
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -925,6 +1113,16 @@ ENTRY(mcck_int_handler)
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -950,6 +1148,7 @@ ENTRY(mcck_int_handler)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
@@ -1045,7 +1244,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: br %r14
+0: BR_R11USE_R14
.align 8
.Lcleanup_table:
@@ -1070,11 +1269,12 @@ cleanup_critical:
.quad .Lsie_done
.Lcleanup_sie:
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_EMPTY(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- br %r14
+ BR_R11USE_R14
#endif
.Lcleanup_system_call:
@@ -1116,7 +1316,8 @@ cleanup_critical:
srag %r9,%r9,23
jz 0f
mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
+0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
@@ -1131,7 +1332,7 @@ cleanup_critical:
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
- br %r14
+ BR_R11USE_R14
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
@@ -1141,7 +1342,7 @@ cleanup_critical:
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore:
# check if stpt has been executed
@@ -1158,14 +1359,14 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore:
# check if stpt has been executed
@@ -1179,7 +1380,7 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
@@ -1232,17 +1433,17 @@ cleanup_critical:
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
- br %r14
+ BR_R11USE_R14
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
- br %r14
+ BR_R11USE_R14
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
- br %r14
+ BR_R11USE_R14
/*
* Integer constants
@@ -1258,7 +1459,6 @@ cleanup_critical:
.Lsie_critical_length:
.quad .Lsie_done - .Lsie_gmap
#endif
-
.section .rodata, "a"
#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7124bc..df49f2a1a7e5 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
+ __bpon();
diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
@@ -798,6 +799,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
/* copy and convert to ebcdic */
memcpy(ipb->hdr.loadparm, buf, lp_len);
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
+ ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
return len;
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index fbc07891f9e7..64ccfdf96b32 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
#if 0
#define DEBUGP printk
@@ -167,7 +170,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
me->arch.got_offset = me->core_layout.size;
me->core_layout.size += me->arch.got_size;
me->arch.plt_offset = me->core_layout.size;
- me->core_layout.size += me->arch.plt_size;
+ if (me->arch.plt_size) {
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+ me->arch.plt_size += PLT_ENTRY_SIZE;
+ me->core_layout.size += me->arch.plt_size;
+ }
return 0;
}
@@ -321,9 +328,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->core_layout.base + me->arch.plt_offset +
info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
- ip[1] = 0x100a0004;
- ip[2] = 0x07f10000;
+ ip[0] = 0x0d10e310; /* basr 1,0 */
+ ip[1] = 0x100a0004; /* lg 1,10(1) */
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+ unsigned int *ij;
+ ij = me->core_layout.base +
+ me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ (unsigned int)(u16)
+ (((unsigned long) ij - 8 -
+ (unsigned long) ip) / 2);
+ } else {
+ ip[2] = 0x07f10000; /* br %r1 */
+ }
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
info->plt_initialized = 1;
@@ -428,6 +446,45 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ const Elf_Shdr *s;
+ char *secstrings, *secname;
+ void *aseg;
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ !nospec_disable && me->arch.plt_size) {
+ unsigned int *ij;
+
+ ij = me->core_layout.base + me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ if (test_facility(35)) {
+ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
+ ij[1] = 0x0005a7f4; /* j . */
+ ij[2] = 0x000007f1; /* br %r1 */
+ } else {
+ ij[0] = 0x44000000 | (unsigned int)
+ offsetof(struct lowcore, br_r1_trampoline);
+ ij[1] = 0xa7f40000; /* j . */
+ }
+ }
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ aseg = (void *) s->sh_addr;
+ secname = secstrings + s->sh_name;
+
+ if (!strcmp(".altinstructions", secname))
+ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_indirect", secname, 14)))
+ nospec_revert(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_return", secname, 12)))
+ nospec_revert(aseg, aseg + s->sh_size);
+ }
+
jump_label_apply_nops(me);
return 0;
}
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 000000000000..9f3b5b382743
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+static int __init nobp_setup_early(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (rc)
+ return rc;
+ if (enabled && test_facility(82)) {
+ /*
+ * The user explicitely requested nobp=1, enable it and
+ * disable the expoline support.
+ */
+ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_disable = 1;
+ } else {
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ return 0;
+}
+early_param("nospec", nospec_setup_early);
+
+static int __init nospec_report(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ pr_info("Spectre V2 mitigation: execute trampolines.\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
+ return 0;
+}
+arch_initcall(nospec_report);
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ return sprintf(buf, "Mitigation: execute trampolines\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ return sprintf(buf, "Mitigation: limited branch prediction.\n");
+ return sprintf(buf, "Vulnerable\n");
+}
+#endif
+
+#ifdef CONFIG_EXPOLINE
+
+int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+ nospec_disable = 1;
+ return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+void __init nospec_auto_detect(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ /*
+ * The kernel has been compiled with expolines.
+ * Keep expolines enabled and disable nobp.
+ */
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ /*
+ * If the kernel has not been compiled with expolines the
+ * nobp setting decides what is done, this depends on the
+ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
+ */
+}
+
+static int __init spectre_v2_setup_early(char *str)
+{
+ if (str && !strncmp(str, "on", 2)) {
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ if (str && !strncmp(str, "off", 3))
+ nospec_disable = 1;
+ if (str && !strncmp(str, "auto", 4))
+ nospec_auto_detect();
+ return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ u8 *instr, *thunk, *br;
+ u8 insnbuf[6];
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+ type = BRCL_EXPOLINE; /* brcl instruction */
+ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+ type = BRASL_EXPOLINE; /* brasl instruction */
+ else
+ continue;
+ thunk = instr + (*(int *)(instr + 2)) * 2;
+ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+ /* exrl %r0,<target-br> */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+ thunk[6] == 0x44 && thunk[7] == 0x00 &&
+ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+ /* larl %rx,<target br> + ex %r0,0(%rx) */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
+ switch (type) {
+ case BRCL_EXPOLINE:
+ /* brcl to thunk, replace with br + nop */
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ case BRASL_EXPOLINE:
+ /* brasl to thunk, replace with basr + nop */
+ insnbuf[0] = 0x0d;
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ }
+
+ s390_kernel_write(instr, insnbuf, 6);
+ }
+}
+
+void __init_or_module nospec_revert(s32 *start, s32 *end)
+{
+ if (nospec_disable)
+ __nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+ nospec_revert(__nospec_call_start, __nospec_call_end);
+ nospec_revert(__nospec_return_start, __nospec_return_end);
+}
+
+#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808085e6..d856263fd768 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -179,3 +179,21 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
+
+int s390_isolate_bp(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e974e53ab597..feb9d97a9d14 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
#include <asm/sclp.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
#include "entry.h"
/*
@@ -335,7 +337,9 @@ static void __init setup_lowcore(void)
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -372,6 +376,7 @@ static void __init setup_lowcore(void)
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0);
#endif
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -871,6 +876,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
+ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+ nospec_auto_detect();
+
parse_early_param();
#ifdef CONFIG_CRASH_DUMP
/* Deactivate elfcorehdr= kernel parameter */
@@ -931,6 +939,10 @@ void __init setup_arch(char **cmdline_p)
conmode_default();
set_preferred_console();
+ apply_alternative_instructions();
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_init_branches();
+
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe1c5ea..0a31110f41f6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -205,6 +205,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -253,7 +254,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -302,6 +305,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
mem_assign_absolute(lc->restart_fn, (unsigned long) func);
mem_assign_absolute(lc->restart_data, (unsigned long) data);
mem_assign_absolute(lc->restart_source, source_cpu);
+ __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -875,6 +879,7 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
+ __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c09d5bf..3d04dfdabc9f 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
return orig;
}
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return user_stack_pointer(regs) <= ret->stack;
+ else
+ return user_stack_pointer(regs) < ret->stack;
+}
+
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 3667d20e997f..dd96b467946b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,8 +31,14 @@ SECTIONS
{
. = 0x00000000;
.text : {
- _text = .; /* Text and read-only data */
+ /* Text and read-only data */
HEAD_TEXT
+ /*
+ * E.g. perf doesn't like symbols starting at address zero,
+ * therefore skip the initial PSW and channel program located
+ * at address zero and let _text start at 0x200.
+ */
+ _text = 0x200;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
@@ -93,6 +99,43 @@ SECTIONS
EXIT_DATA
}
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ * Note, that it is a part of __init region.
+ */
+ . = ALIGN(8);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ /*
+ * And here are the replacement instructions. The linker sticks
+ * them as binary blobs. The .altinstructions has enough data to
+ * get the address and the length of them to patch the kernel safely.
+ * Note, that it is a part of __init region.
+ */
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ /*
+ * Table with the patch locations to undo expolines
+ */
+ .nospec_call_table : {
+ __nospec_call_start = . ;
+ *(.s390_indirect*)
+ __nospec_call_end = . ;
+ }
+ .nospec_return_table : {
+ __nospec_return_start = . ;
+ *(.s390_return*)
+ __nospec_return_end = . ;
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 5ba494ed18c1..2032ab81b2d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -401,6 +401,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_RI:
r = test_facility(64);
break;
+ case KVM_CAP_S390_BPB:
+ r = test_facility(82);
+ break;
default:
r = 0;
}
@@ -1601,6 +1604,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ return;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
@@ -1712,6 +1716,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+ if (test_kvm_facility(vcpu->kvm, 82))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
*/
@@ -1828,7 +1834,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
-
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.enabled_gmap);
@@ -1876,6 +1881,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2743,6 +2749,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (riccb->valid)
vcpu->arch.sie_block->ecb3 |= 0x01;
}
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+ test_kvm_facility(vcpu->kvm, 82)) {
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+ }
kvm_run->kvm_dirty_regs = 0;
}
@@ -2761,6 +2772,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d8673e243f13..ced6c9b8f04d 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -217,6 +217,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
memcpy(scb_o->gcr, scb_s->gcr, 128);
scb_o->pp = scb_s->pp;
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82)) {
+ scb_o->fpf &= ~FPF_BPBC;
+ scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+ }
+
/* interrupt intercept */
switch (scb_s->icptcode) {
case ICPT_PROGI:
@@ -259,6 +265,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->ecb3 = 0;
scb_s->ecd = 0;
scb_s->fac = 0;
+ scb_s->fpf = 0;
rc = prepare_cpuflags(vcpu, vsie_page);
if (rc)
@@ -316,6 +323,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
prefix_unmapped(vsie_page);
scb_s->ecb |= scb_o->ecb & 0x10U;
}
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82))
+ scb_s->fpf |= scb_o->fpf & FPF_BPBC;
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
scb_s->eca |= scb_o->eca & 0x00020000U;
@@ -754,6 +764,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ int guest_bp_isolation;
int rc;
handle_last_fault(vcpu, vsie_page);
@@ -764,6 +775,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
s390_handle_mcck();
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+ /* save current guest state of bp isolation override */
+ guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+ /*
+ * The guest is running with BPBC, so we have to force it on for our
+ * nested guest. This is done by enabling BPBC globally, so the BPBC
+ * control in the SCB (which the nested guest can modify) is simply
+ * ignored.
+ */
+ if (test_kvm_facility(vcpu->kvm, 82) &&
+ vcpu->arch.sie_block->fpf & FPF_BPBC)
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
local_irq_disable();
guest_enter_irqoff();
local_irq_enable();
@@ -773,6 +798,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
local_irq_disable();
guest_exit_irqoff();
local_irq_enable();
+
+ /* restore guest state for bp isolation override */
+ if (!guest_bp_isolation)
+ clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
if (rc > 0)
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 658326f44df8..5e0267624d8d 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -8,6 +8,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/sh_eth.h>
#include <mach-se/mach/se.h>
#include <mach-se/mach/mrshpc.h>
#include <asm/machvec.h>
@@ -114,6 +115,11 @@ static struct platform_device heartbeat_device = {
#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
defined(CONFIG_CPU_SUBTYPE_SH7712)
/* SH771X Ethernet driver */
+static struct sh_eth_plat_data sh_eth_plat = {
+ .phy = PHY_ID,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct resource sh_eth0_resources[] = {
[0] = {
.start = SH_ETH0_BASE,
@@ -131,7 +137,7 @@ static struct platform_device sh_eth0_device = {
.name = "sh771x-ether",
.id = 0,
.dev = {
- .platform_data = PHY_ID,
+ .platform_data = &sh_eth_plat,
},
.num_resources = ARRAY_SIZE(sh_eth0_resources),
.resource = sh_eth0_resources,
@@ -154,7 +160,7 @@ static struct platform_device sh_eth1_device = {
.name = "sh771x-ether",
.id = 1,
.dev = {
- .platform_data = PHY_ID,
+ .platform_data = &sh_eth_plat,
},
.num_resources = ARRAY_SIZE(sh_eth1_resources),
.resource = sh_eth1_resources,
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index d0078747d308..8f8cf941a8cd 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -27,21 +27,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- u32 oparg = (encoded_op << 8) >> 20;
- u32 cmparg = (encoded_op << 20) >> 20;
u32 oldval, newval, prev;
int ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
do {
@@ -80,17 +71,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
- case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
- case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
- case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
}
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0dabf7..1cfd89d92208 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
pagefault_disable();
switch (op) {
@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 59d503866431..9cc600b2d68c 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
+ /*
+ * If this is a control-only packet, there is nothing
+ * else to do but advance the rx queue since the packet
+ * was already processed above.
+ */
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
- goto no_data;
+ break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index e64a1b75fc38..83c1e639b411 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
lock = __atomic_hashed_lock((int __force *)uaddr)
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int uninitialized_var(val), ret;
__futex_prolog();
@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
/* The 32-bit futex code makes this assumption, so validate it here. */
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (val == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (val != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (val < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (val >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (val <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (val > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = val;
+
return ret;
}
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 2db18cbbb0ea..c0197097c86e 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -12,6 +12,7 @@
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/stat.h>
+#include <sys/sysmacros.h>
#include <sys/un.h>
#include <sys/types.h>
#include <os.h>
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index a86d7cc2c2d8..bf0acb8aad8b 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -16,6 +16,7 @@
#include <os.h>
#include <sysdep/mcontext.h>
#include <um_malloc.h>
+#include <sys/ucontext.h>
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
@@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
static void hard_handler(int sig, siginfo_t *si, void *p)
{
- struct ucontext *uc = p;
+ ucontext_t *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
unsigned long pending = 1UL << sig;
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index cd22cb8ebd42..f408babdf746 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -172,6 +172,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
LDFLAGS := -m elf_$(UTS_MACHINE)
+#
+# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
+# the linker to force 2MB page size regardless of the default page size used
+# by the linker.
+#
+ifdef CONFIG_X86_64
+LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
+endif
+
# Speed up the build
KBUILD_CFLAGS += -pipe
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
@@ -184,7 +193,10 @@ KBUILD_AFLAGS += $(mflags-y)
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
- RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+ RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+
+ RETPOLINE_CFLAGS += $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
ifneq ($(RETPOLINE_CFLAGS),)
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
endif
diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h
index 2e59dac07f9e..d732e608e3af 100644
--- a/arch/x86/boot/compressed/error.h
+++ b/arch/x86/boot/compressed/error.h
@@ -1,7 +1,9 @@
#ifndef BOOT_COMPRESSED_ERROR_H
#define BOOT_COMPRESSED_ERROR_H
+#include <linux/compiler.h>
+
void warn(char *m);
-void error(char *m);
+void error(char *m) __noreturn;
#endif /* BOOT_COMPRESSED_ERROR_H */
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 6de58f1bd7ec..eb5ff66b4d7a 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -460,10 +460,17 @@ void choose_random_location(unsigned long input,
add_identity_map(random_addr, output_size);
*output = random_addr;
}
+
+ /*
+ * This loads the identity mapping page table.
+ * This should only be done if a new physical address
+ * is found for the kernel, otherwise we should keep
+ * the old page table to make it be like the "nokaslr"
+ * case.
+ */
+ finalize_identity_maps();
}
- /* This actually loads the identity pagetable on x86_64. */
- finalize_identity_maps();
/* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
if (IS_ENABLED(CONFIG_X86_64))
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index c945acd8fa33..d86e68d3c794 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -299,6 +299,10 @@ static void parse_elf(void *output)
switch (phdr->p_type) {
case PT_LOAD:
+#ifdef CONFIG_X86_64
+ if ((phdr->p_align % 0x200000) != 0)
+ error("Alignment of LOAD segment isn't multiple of 2MB");
+#endif
#ifdef CONFIG_RELOCATABLE
dest = output;
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index d7699130ee36..1e546e38ded9 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -66,8 +66,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
int err;
- fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
-
err = blkcipher_walk_virt(desc, walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -79,6 +77,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+ fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
do {
fn(ctx, wdst, wsrc);
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 5a76cad3028b..881774ba8732 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -237,8 +237,7 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
- /* Clobbers %ebx */
- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5395b86cff3e..4ecd24b3966a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -176,13 +176,26 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r8 /* pt_regs->r8 */
pushq %r9 /* pt_regs->r9 */
pushq %r10 /* pt_regs->r10 */
+ /*
+ * Clear extra registers that a speculation attack might
+ * otherwise want to exploit. Interleave XOR with PUSH
+ * for better uop scheduling:
+ */
+ xorq %r10, %r10 /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp */
pushq %r12 /* pt_regs->r12 */
+ xorq %r12, %r12 /* nospec r12 */
pushq %r13 /* pt_regs->r13 */
+ xorq %r13, %r13 /* nospec r13 */
pushq %r14 /* pt_regs->r14 */
+ xorq %r14, %r14 /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
+ xorq %r15, %r15 /* nospec r15 */
/* IRQs are off. */
movq %rsp, %rdi
@@ -318,8 +331,7 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
- /* Clobbers %rbx */
- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
@@ -949,7 +961,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
#endif /* CONFIG_HYPERV */
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
+idtentry int3 do_int3 has_error_code=0
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f73796db8758..02e547f9ca3f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -26,6 +26,7 @@
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -303,17 +304,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
config = attr->config;
- cache_type = (config >> 0) & 0xff;
+ cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
+ cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
+ cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
+ cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
@@ -420,6 +424,8 @@ int x86_setup_perfctr(struct perf_event *event)
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
+ attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
+
/*
* The generic map:
*/
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 0bd0c1cc3228..6f353a874178 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3025,7 +3025,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
X86_CONFIG(.event=0xc0, .umask=0x01)) {
if (left < 128)
left = 128;
- left &= ~0x3fu;
+ left &= ~0x3fULL;
}
return left;
}
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 1076c9a77292..47d526c700a1 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -90,6 +90,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "../perf_event.h"
@@ -300,6 +301,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
} else if (event->pmu == &cstate_pkg_pmu) {
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
return -EINVAL;
+ cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
if (!pkg_msr[cfg].attr)
return -EINVAL;
event->hw.event_base = pkg_msr[cfg].msr;
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index afe8024e9e95..6bc36944a8c1 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3522,24 +3522,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = {
NULL,
};
+/*
+ * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
+ * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
+ */
+#define SKX_CAPID6 0x9c
+#define SKX_CHA_BIT_MASK GENMASK(27, 0)
+
static int skx_count_chabox(void)
{
- struct pci_dev *chabox_dev = NULL;
- int bus, count = 0;
+ struct pci_dev *dev = NULL;
+ u32 val = 0;
- while (1) {
- chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
- if (!chabox_dev)
- break;
- if (count == 0)
- bus = chabox_dev->bus->number;
- if (bus != chabox_dev->bus->number)
- break;
- count++;
- }
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
+ if (!dev)
+ goto out;
- pci_dev_put(chabox_dev);
- return count;
+ pci_read_config_dword(dev, SKX_CAPID6, &val);
+ val &= SKX_CHA_BIT_MASK;
+out:
+ pci_dev_put(dev);
+ return hweight32(val);
}
void skx_uncore_cpu_init(void)
@@ -3566,7 +3569,7 @@ static struct intel_uncore_type skx_uncore_imc = {
};
static struct attribute *skx_upi_uncore_formats_attr[] = {
- &format_attr_event_ext.attr,
+ &format_attr_event.attr,
&format_attr_umask_ext.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4bb3ec69e8ea..be0b1968d60a 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -1,4 +1,5 @@
#include <linux/perf_event.h>
+#include <linux/nospec.h>
#include <asm/intel-family.h>
enum perf_msr_id {
@@ -136,9 +137,6 @@ static int msr_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
- if (cfg >= PERF_MSR_EVENT_MAX)
- return -EINVAL;
-
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||
@@ -149,6 +147,11 @@ static int msr_event_init(struct perf_event *event)
event->attr.sample_period) /* no sampling */
return -EINVAL;
+ if (cfg >= PERF_MSR_EVENT_MAX)
+ return -EINVAL;
+
+ cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
+
if (!msr[cfg].attr)
return -EINVAL;
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 93eebc636c76..46e40aeae446 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -6,6 +6,8 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
+#include <asm/nospec-branch.h>
+
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
}
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -55,6 +59,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -67,6 +72,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
return error;
}
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 166654218329..5a25ada75aeb 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -37,7 +37,4 @@ INDIRECT_THUNK(dx)
INDIRECT_THUNK(si)
INDIRECT_THUNK(di)
INDIRECT_THUNK(bp)
-asmlinkage void __fill_rsb(void);
-asmlinkage void __clear_rsb(void);
-
#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7bb29a416b77..8d8c24f3a963 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -34,6 +34,7 @@
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
+#define _ASM_MUL __ASM_SIZE(mul)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
@@ -128,6 +129,7 @@
#endif
#ifndef __ASSEMBLY__
+#ifndef __BPF__
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
@@ -137,5 +139,6 @@
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif
+#endif
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 8eb23f5cf7f4..a2485311164b 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -203,6 +203,7 @@
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -301,6 +302,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 389d700b961e..9df22bb07f7f 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -5,6 +5,7 @@
#include <asm/pgtable.h>
#include <asm/processor-flags.h>
#include <asm/tlb.h>
+#include <asm/nospec-branch.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
@@ -35,8 +36,18 @@
extern unsigned long asmlinkage efi_call_phys(void *, ...);
-#define arch_efi_call_virt_setup() kernel_fpu_begin()
-#define arch_efi_call_virt_teardown() kernel_fpu_end()
+#define arch_efi_call_virt_setup() \
+({ \
+ kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
+})
+
+#define arch_efi_call_virt_teardown() \
+({ \
+ firmware_restrict_branch_speculation_end(); \
+ kernel_fpu_end(); \
+})
+
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
@@ -72,6 +83,7 @@ struct efi_scratch {
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
\
if (efi_scratch.use_pgd) { \
efi_scratch.prev_cr3 = read_cr3(); \
@@ -90,6 +102,7 @@ struct efi_scratch {
__flush_tlb_all(); \
} \
\
+ firmware_restrict_branch_speculation_end(); \
__kernel_fpu_end(); \
preempt_enable(); \
})
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f5453436..f4dc9b63bdda 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 8b272a08d1a8..e2e09347ee3c 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -3,12 +3,18 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/atomic.h>
/*
- * The x86 doesn't have a mmu context, but
- * we put the segment information here.
+ * x86 has arch-specific MMU state beyond what lives in mm_struct.
*/
typedef struct {
+ /*
+ * ctx_id uniquely identifies this mm_struct. A ctx_id will never
+ * be reused, and zero is not a valid ctx_id.
+ */
+ u64 ctx_id;
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
#endif
@@ -33,6 +39,11 @@ typedef struct {
#endif
} mm_context_t;
+#define INIT_MM_CONTEXT(mm) \
+ .context = { \
+ .ctx_id = 1, \
+ }
+
void leave_mm(int cpu);
#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index d23e35584f15..5a295bb97103 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -12,6 +12,9 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/mpx.h>
+
+extern atomic64_t last_mm_ctx_id;
+
#ifndef CONFIG_PARAVIRT
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
@@ -106,6 +109,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
+
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
/* pkey 0 is the default and always allocated */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 76b058533e47..f928ad9b143f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -8,6 +8,50 @@
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS 16 /* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version — two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp) \
+ mov $(nr/2), reg; \
+771: \
+ call 772f; \
+773: /* speculation trap */ \
+ pause; \
+ lfence; \
+ jmp 773b; \
+772: \
+ call 774f; \
+775: /* speculation trap */ \
+ pause; \
+ lfence; \
+ jmp 775b; \
+774: \
+ dec reg; \
+ jnz 771b; \
+ add $(BITS_PER_LONG/8) * nr, sp;
+
#ifdef __ASSEMBLY__
/*
@@ -24,6 +68,18 @@
.endm
/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+.macro ANNOTATE_RETPOLINE_SAFE
+ .Lannotate_\@:
+ .pushsection .discard.retpoline_safe
+ _ASM_PTR .Lannotate_\@
+ .popsection
+.endm
+
+/*
* These are the bare retpoline primitives for indirect jmp and call.
* Do not use these directly; they only exist to make the ALTERNATIVE
* invocation below less ugly.
@@ -59,9 +115,9 @@
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE_2 __stringify(jmp *\reg), \
+ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
- __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
#else
jmp *\reg
#endif
@@ -70,18 +126,25 @@
.macro CALL_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE_2 __stringify(call *\reg), \
+ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
- __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
#else
call *\reg
#endif
.endm
-/* This clobbers the BX register */
-.macro FILL_RETURN_BUFFER nr:req ftr:req
+ /*
+ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+ * monstrosity above, manually.
+ */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
- ALTERNATIVE "", "call __clear_rsb", \ftr
+ ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE "jmp .Lskip_rsb_\@", \
+ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
+ \ftr
+.Lskip_rsb_\@:
#endif
.endm
@@ -93,6 +156,12 @@
".long 999b - .\n\t" \
".popsection\n\t"
+#define ANNOTATE_RETPOLINE_SAFE \
+ "999:\n\t" \
+ ".pushsection .discard.retpoline_safe\n\t" \
+ _ASM_PTR " 999b\n\t" \
+ ".popsection\n\t"
+
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
/*
@@ -102,6 +171,7 @@
# define CALL_NOSPEC \
ANNOTATE_NOSPEC_ALTERNATIVE \
ALTERNATIVE( \
+ ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE)
@@ -113,7 +183,10 @@
* otherwise we'll run out of registers. We don't care about CET
* here, anyway.
*/
-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
+# define CALL_NOSPEC \
+ ALTERNATIVE( \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
" jmp 904f;\n" \
" .align 16\n" \
"901: call 903f;\n" \
@@ -156,25 +229,90 @@ extern char __indirect_thunk_end[];
static inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
- alternative_input("",
- "call __fill_rsb",
- X86_FEATURE_RETPOLINE,
- ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+ unsigned long loops;
+
+ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE("jmp 910f",
+ __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+ X86_FEATURE_RETPOLINE)
+ "910:"
+ : "=r" (loops), ASM_CALL_CONSTRAINT
+ : : "memory" );
#endif
}
+#define alternative_msr_write(_msr, _val, _feature) \
+ asm volatile(ALTERNATIVE("", \
+ "movl %[msr], %%ecx\n\t" \
+ "movl %[val], %%eax\n\t" \
+ "movl $0, %%edx\n\t" \
+ "wrmsr", \
+ _feature) \
+ : : [msr] "i" (_msr), [val] "i" (_val) \
+ : "eax", "ecx", "edx", "memory")
+
static inline void indirect_branch_prediction_barrier(void)
{
- asm volatile(ALTERNATIVE("",
- "movl %[msr], %%ecx\n\t"
- "movl %[val], %%eax\n\t"
- "movl $0, %%edx\n\t"
- "wrmsr",
- X86_FEATURE_USE_IBPB)
- : : [msr] "i" (MSR_IA32_PRED_CMD),
- [val] "i" (PRED_CMD_IBPB)
- : "eax", "ecx", "edx", "memory");
+ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+ X86_FEATURE_USE_IBPB);
}
+/*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+ *
+ * (Implemented as CPP macros due to header hell.)
+ */
+#define firmware_restrict_branch_speculation_start() \
+do { \
+ preempt_disable(); \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
+ X86_FEATURE_USE_IBRS_FW); \
+} while (0)
+
+#define firmware_restrict_branch_speculation_end() \
+do { \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+} while (0)
+
#endif /* __ASSEMBLY__ */
+
+/*
+ * Below is used in the eBPF JIT compiler and emits the byte sequence
+ * for the following assembly:
+ *
+ * With retpolines configured:
+ *
+ * callq do_rop
+ * spec_trap:
+ * pause
+ * lfence
+ * jmp spec_trap
+ * do_rop:
+ * mov %rax,(%rsp)
+ * retq
+ *
+ * Without retpolines configured:
+ *
+ * jmp *%rax
+ */
+#ifdef CONFIG_RETPOLINE
+# define RETPOLINE_RAX_BPF_JIT_SIZE 17
+# define RETPOLINE_RAX_BPF_JIT() \
+ EMIT1_off32(0xE8, 7); /* callq do_rop */ \
+ /* spec_trap: */ \
+ EMIT2(0xF3, 0x90); /* pause */ \
+ EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
+ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
+ /* do_rop: */ \
+ EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
+ EMIT1(0xC3); /* retq */
+#else
+# define RETPOLINE_RAX_BPF_JIT_SIZE 2
+# define RETPOLINE_RAX_BPF_JIT() \
+ EMIT2(0xFF, 0xE0); /* jmp *%rax */
+#endif
+
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ce932812f142..24af8b1de438 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -6,6 +6,7 @@
#ifdef CONFIG_PARAVIRT
#include <asm/pgtable_types.h>
#include <asm/asm.h>
+#include <asm/nospec-branch.h>
#include <asm/paravirt_types.h>
@@ -869,23 +870,27 @@ extern void default_banner(void);
#define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
+ ANNOTATE_RETPOLINE_SAFE; \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#ifdef CONFIG_X86_32
#define GET_CR0_INTO_EAX \
push %ecx; push %edx; \
+ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
pop %edx; pop %ecx
#else /* !CONFIG_X86_32 */
@@ -907,11 +912,13 @@ extern void default_banner(void);
*/
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
)
#define GET_CR2_INTO_RAX \
- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
@@ -921,7 +928,8 @@ extern void default_banner(void);
#define USERGS_SYSRET64 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+ ANNOTATE_RETPOLINE_SAFE; \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0f400c0e4979..04b79712b09c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -42,6 +42,7 @@
#include <asm/desc_defs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable_types.h>
+#include <asm/nospec-branch.h>
struct page;
struct thread_struct;
@@ -391,7 +392,9 @@ int paravirt_disable_iospace(void);
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
+#define PARAVIRT_CALL \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%c[paravirt_opptr];"
/*
* These macros are intended to wrap calls through one of the paravirt
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 2cb1cc253d51..fc62ba8dce93 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -15,6 +15,7 @@ struct machine_ops {
};
extern struct machine_ops machine_ops;
+extern int crashing_cpu;
void native_machine_crash_shutdown(struct pt_regs *regs);
void native_machine_shutdown(void);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 94146f665a3c..99185a064978 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -68,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
struct tlb_state {
struct mm_struct *active_mm;
int state;
+ /* last user mm's ctx id */
+ u64 last_ctx_id;
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 6899cf187ba2..9cbfbef6a115 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -309,6 +309,7 @@ enum vmcs_field {
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
+#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
/* GUEST_INTERRUPTIBILITY_INFO flags. */
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a6..90ab9a795b49 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
+ __kernel_ulong_t msg_qnum; /* number of messages in queue */
+ __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de38..644421f3823b 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ __kernel_ulong_t shm_nattch; /* no. of current attaches */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+struct shminfo64 {
+ __kernel_ulong_t shmmax;
+ __kernel_ulong_t shmmin;
+ __kernel_ulong_t shmmni;
+ __kernel_ulong_t shmseg;
+ __kernel_ulong_t shmall;
+ __kernel_ulong_t __unused1;
+ __kernel_ulong_t __unused2;
+ __kernel_ulong_t __unused3;
+ __kernel_ulong_t __unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index b5229abd1629..4922ab66fd29 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -93,8 +93,12 @@ out_data:
return NULL;
}
-static void free_apic_chip_data(struct apic_chip_data *data)
+static void free_apic_chip_data(unsigned int virq, struct apic_chip_data *data)
{
+#ifdef CONFIG_X86_IO_APIC
+ if (virq < nr_legacy_irqs())
+ legacy_irq_data[virq] = NULL;
+#endif
if (data) {
free_cpumask_var(data->domain);
free_cpumask_var(data->old_domain);
@@ -318,11 +322,7 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
apic_data = irq_data->chip_data;
irq_domain_reset_irq_data(irq_data);
raw_spin_unlock_irqrestore(&vector_lock, flags);
- free_apic_chip_data(apic_data);
-#ifdef CONFIG_X86_IO_APIC
- if (virq + i < nr_legacy_irqs())
- legacy_irq_data[virq + i] = NULL;
-#endif
+ free_apic_chip_data(virq + i, apic_data);
}
}
}
@@ -363,7 +363,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
err = assign_irq_vector_policy(virq + i, node, data, info);
if (err) {
irq_data->chip_data = NULL;
- free_apic_chip_data(data);
+ free_apic_chip_data(virq + i, data);
goto error;
}
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index baddc9ed3454..b8b0b6e78371 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -299,6 +299,15 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
}
+
+ /*
+ * Retpoline means the kernel is safe because it has no indirect
+ * branches. But firmware isn't, so use IBRS to protect that.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }
}
#undef pr_fmt
@@ -325,8 +334,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n");
- return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
spectre_v2_module_string());
}
#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 6ed206bd9071..8fb1d6522f8e 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -64,7 +64,7 @@ void check_mpx_erratum(struct cpuinfo_x86 *c)
/*
* Early microcode releases for the Spectre v2 mitigation were broken.
* Information taken from;
- * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
* - https://kb.vmware.com/s/article/52345
* - Microcode revisions observed in the wild
* - Release note from 20180108 microcode release
@@ -82,7 +82,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
@@ -103,6 +102,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{
int i;
+ /*
+ * We know that the hypervisor lie to us on the microcode version so
+ * we may as well hope that it is running the correct version.
+ */
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index e1f8b856d62e..b08bfcd77e22 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -50,6 +50,7 @@
#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/msr.h>
+#include <asm/reboot.h>
#include "mce-internal.h"
@@ -63,6 +64,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
smp_load_acquire(&(p)); \
})
+/* sysfs synchronization */
+static DEFINE_MUTEX(mce_sysfs_mutex);
+
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
@@ -1080,9 +1084,22 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* on Intel.
*/
int lmce = 1;
+ int cpu = smp_processor_id();
- /* If this CPU is offline, just bail out. */
- if (cpu_is_offline(smp_processor_id())) {
+ /*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ * skip those CPUs which remain looping in the 1st kernel - see
+ * crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+ if (cpu_is_offline(cpu) ||
+ (crashing_cpu != -1 && crashing_cpu != cpu)) {
u64 mcgstatus;
mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
@@ -1705,30 +1722,35 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
return 0;
}
-static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
+/*
+ * Init basic CPU features needed for early decoding of MCEs.
+ */
+static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
{
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL:
- mce_intel_feature_init(c);
- mce_adjust_timer = cmci_intel_adjust_timer;
- break;
-
- case X86_VENDOR_AMD: {
+ if (c->x86_vendor == X86_VENDOR_AMD) {
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
- /*
- * Install proper ops for Scalable MCA enabled processors
- */
if (mce_flags.smca) {
msr_ops.ctl = smca_ctl_reg;
msr_ops.status = smca_status_reg;
msr_ops.addr = smca_addr_reg;
msr_ops.misc = smca_misc_reg;
}
- mce_amd_feature_init(c);
+ }
+}
+static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
+{
+ switch (c->x86_vendor) {
+ case X86_VENDOR_INTEL:
+ mce_intel_feature_init(c);
+ mce_adjust_timer = cmci_intel_adjust_timer;
+ break;
+
+ case X86_VENDOR_AMD: {
+ mce_amd_feature_init(c);
break;
}
@@ -1815,6 +1837,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
machine_check_vector = do_machine_check;
+ __mcheck_cpu_init_early(c);
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_clear_banks();
@@ -2336,6 +2359,7 @@ static ssize_t set_ignore_ce(struct device *s,
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
+ mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
@@ -2348,6 +2372,8 @@ static ssize_t set_ignore_ce(struct device *s,
on_each_cpu(mce_enable_ce, (void *)1, 1);
}
}
+ mutex_unlock(&mce_sysfs_mutex);
+
return size;
}
@@ -2360,6 +2386,7 @@ static ssize_t set_cmci_disabled(struct device *s,
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
+ mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
@@ -2371,6 +2398,8 @@ static ssize_t set_cmci_disabled(struct device *s,
on_each_cpu(mce_enable_ce, NULL, 1);
}
}
+ mutex_unlock(&mce_sysfs_mutex);
+
return size;
}
@@ -2378,8 +2407,19 @@ static ssize_t store_int_with_restart(struct device *s,
struct device_attribute *attr,
const char *buf, size_t size)
{
- ssize_t ret = device_store_int(s, attr, buf, size);
+ unsigned long old_check_interval = check_interval;
+ ssize_t ret = device_store_ulong(s, attr, buf, size);
+
+ if (check_interval == old_check_interval)
+ return ret;
+
+ if (check_interval < 1)
+ check_interval = 1;
+
+ mutex_lock(&mce_sysfs_mutex);
mce_restart();
+ mutex_unlock(&mce_sysfs_mutex);
+
return ret;
}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 4bcd30c87531..79291d6fb301 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -474,7 +474,6 @@ static void show_saved_mc(void)
*/
static void save_mc_for_early(u8 *mc)
{
-#ifdef CONFIG_HOTPLUG_CPU
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
@@ -521,7 +520,6 @@ static void save_mc_for_early(u8 *mc)
out:
mutex_unlock(&x86_cpu_microcode_mutex);
-#endif
}
static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 67cd7c1b99da..9d72cf547c88 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -22,6 +22,7 @@
#include <asm/nops.h>
#include "../entry/calling.h"
#include <asm/export.h>
+#include <asm/nospec-branch.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -200,6 +201,7 @@ ENTRY(secondary_startup_64)
/* Ensure I am executing from virtual addresses */
movq $1f, %rax
+ ANNOTATE_RETPOLINE_SAFE
jmp *%rax
1:
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index be22f5a2192e..4e3b8a587c88 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -418,6 +418,7 @@ struct legacy_pic default_legacy_pic = {
};
struct legacy_pic *legacy_pic = &default_legacy_pic;
+EXPORT_SYMBOL(legacy_pic);
static int __init i8259A_init_ops(void)
{
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index b55d07b9d530..91c48cdfe81f 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -51,6 +51,7 @@
#include <linux/ftrace.h>
#include <linux/frame.h>
#include <linux/kasan.h>
+#include <linux/moduleloader.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -199,6 +200,8 @@ retry:
return (opcode != 0x62 && opcode != 0x67);
case 0x70:
return 0; /* can't boost conditional jump */
+ case 0x90:
+ return opcode != 0x9a; /* can't boost call far */
case 0xc0:
/* can't boost software-interruptions */
return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
@@ -403,10 +406,20 @@ int __copy_instruction(u8 *dest, u8 *src)
return length;
}
+/* Recover page to RW mode before releasing it */
+void free_insn_page(void *page)
+{
+ set_memory_nx((unsigned long)page & PAGE_MASK, 1);
+ set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+ module_memfree(page);
+}
+
static int arch_copy_kprobe(struct kprobe *p)
{
int ret;
+ set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
+
/* Copy an instruction with recovering if other optprobe modifies it.*/
ret = __copy_instruction(p->ainsn.insn, p->addr);
if (!ret)
@@ -421,6 +434,8 @@ static int arch_copy_kprobe(struct kprobe *p)
else
p->ainsn.boostable = -1;
+ set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
+
/* Check whether the instruction modifies Interrupt Flag or not */
p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index dc20da1c78f0..fa671b90c374 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -371,6 +371,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
}
buf = (u8 *)op->optinsn.insn;
+ set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
/* Copy instructions into the out-of-line buffer */
ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
@@ -393,6 +394,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
(u8 *)op->kp.addr + op->optinsn.size);
+ set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
+
flush_icache_range((unsigned long) buf,
(unsigned long) buf + TMPL_END_IDX +
op->optinsn.size + RELATIVEJUMP_SIZE);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 8c1f218926d7..a5784a14f8d1 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -524,6 +524,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
goto overflow;
break;
case R_X86_64_PC32:
+ case R_X86_64_PLT32:
value -= (u64)address;
*(u32 *)location = value;
break;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 477ae806c2fa..19977d2f97fb 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -171,19 +171,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_X86_64_NONE:
break;
case R_X86_64_64:
+ if (*(u64 *)loc != 0)
+ goto invalid_relocation;
*(u64 *)loc = val;
break;
case R_X86_64_32:
+ if (*(u32 *)loc != 0)
+ goto invalid_relocation;
*(u32 *)loc = val;
if (val != *(u32 *)loc)
goto overflow;
break;
case R_X86_64_32S:
+ if (*(s32 *)loc != 0)
+ goto invalid_relocation;
*(s32 *)loc = val;
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
case R_X86_64_PC32:
+ case R_X86_64_PLT32:
+ if (*(u32 *)loc != 0)
+ goto invalid_relocation;
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
@@ -199,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
return 0;
+invalid_relocation:
+ pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
+ (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
+ return -ENOEXEC;
+
overflow:
pr_err("overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index ce020a69bba9..03f21dbfaa9d 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -769,10 +769,11 @@ void machine_crash_shutdown(struct pt_regs *regs)
#endif
+/* This is the CPU performing the emergency shutdown work. */
+int crashing_cpu = -1;
+
#if defined(CONFIG_SMP)
-/* This keeps a track of which one is crashing cpu. */
-static int crashing_cpu;
static nmi_shootdown_cb shootdown_callback;
static atomic_t waiting_for_crash_ipi;
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 2bbd27f89802..f9da471a7707 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -287,4 +287,25 @@ void __init setup_per_cpu_areas(void)
/* Setup cpu initialized, callin, callout masks */
setup_cpu_local_masks();
+
+#ifdef CONFIG_X86_32
+ /*
+ * Sync back kernel address range again. We already did this in
+ * setup_arch(), but percpu data also needs to be available in
+ * the smpboot asm. We can't reliably pick up percpu mappings
+ * using vmalloc_fault(), because exception dispatch needs
+ * percpu data.
+ */
+ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ KERNEL_PGD_PTRS);
+
+ /*
+ * sync back low identity map too. It is used for example
+ * in the 32-bit EFI stub.
+ */
+ clone_pgd_range(initial_page_table,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+#endif
}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index ca699677e288..ea217caa731c 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -33,6 +33,7 @@
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
#include <asm/kexec.h>
+#include <asm/virtext.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
@@ -162,6 +163,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
return NMI_HANDLED;
+ cpu_emergency_vmxoff();
stop_this_cpu(NULL);
return NMI_HANDLED;
@@ -174,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
asmlinkage __visible void smp_reboot_interrupt(void)
{
ipi_entering_ack_irq();
+ cpu_emergency_vmxoff();
stop_this_cpu(NULL);
irq_exit();
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e803d72ef525..83929cc47a4b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1591,6 +1591,8 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
int i;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 1119414ab419..1d4e7fd3e66d 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/elf.h>
+#include <asm/compat.h>
#include <asm/ia32.h>
#include <asm/syscalls.h>
@@ -100,7 +101,7 @@ out:
static void find_start_end(unsigned long flags, unsigned long *begin,
unsigned long *end)
{
- if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
+ if (!in_compat_syscall() && (flags & MAP_32BIT)) {
/* This is usually used needed to map code in small
model, so it needs to be in the first 31bit. Limit
it to that. This means we need to move the
@@ -175,7 +176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
/* for MAP_32BIT mappings we force the legacy mmap base */
- if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
+ if (!in_compat_syscall() && (flags & MAP_32BIT))
goto bottomup;
/* requesting a specific address */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 322f433fbc76..f2142932ff0b 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -526,7 +526,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
}
NOKPROBE_SYMBOL(do_general_protection);
-/* May run on IST stack. */
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -541,7 +540,15 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
if (poke_int3_handler(regs))
return;
+ /*
+ * Use ist_enter despite the fact that we don't use an IST stack.
+ * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
+ * mode or even during context tracking state changes.
+ *
+ * This means that we can't schedule. That's okay.
+ */
ist_enter(regs);
+
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
@@ -558,17 +565,11 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
SIGTRAP) == NOTIFY_STOP)
goto exit;
- /*
- * Let others (NMI) know that the debug stack is in use
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
preempt_disable();
cond_local_irq_enable(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
cond_local_irq_disable(regs);
preempt_enable_no_resched();
- debug_stack_usage_dec();
exit:
ist_exit(regs);
}
@@ -989,19 +990,16 @@ void __init trap_init(void)
cpu_init();
/*
- * X86_TRAP_DB and X86_TRAP_BP have been set
- * in early_trap_init(). However, ITS works only after
- * cpu_init() loads TSS. See comments in early_trap_init().
+ * X86_TRAP_DB was installed in early_trap_init(). However,
+ * IST works only after cpu_init() loads TSS. See comments
+ * in early_trap_init().
*/
set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
- /* int3 can be called from all */
- set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
x86_init.irqs.trap_init();
#ifdef CONFIG_X86_64
memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
set_nmi_gate(X86_TRAP_DB, &debug);
- set_nmi_gate(X86_TRAP_BP, &int3);
#endif
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d07a9390023e..da6a287a11e4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -366,6 +366,8 @@ static int __init tsc_setup(char *str)
tsc_clocksource_reliable = 1;
if (!strncmp(str, "noirqtime", 9))
no_sched_irq_time = 1;
+ if (!strcmp(str, "unstable"))
+ mark_tsc_unstable("boot parameter");
return 1;
}
@@ -407,7 +409,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
hpet2 -= hpet1;
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
do_div(tmp, 1000000);
- do_div(deltatsc, tmp);
+ deltatsc = div64_u64(deltatsc, tmp);
return (unsigned long) deltatsc;
}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 8a1d63591399..961831bf74b1 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -719,7 +719,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
return;
check_vip:
- if (VEFLAGS & X86_EFLAGS_VIP) {
+ if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
+ (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
save_v86_state(regs, VM86_STI);
return;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02a062b0de5d..1015f4c3b9ed 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1363,8 +1363,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
{
+ preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
+ preempt_enable();
}
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index be644afab1bb..aaa93b4b0380 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -44,6 +44,7 @@
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>
#include <asm/virtext.h>
@@ -1878,6 +1879,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
*/
if (var->unusable)
var->db = 0;
+ /* This is symmetric with svm_set_segment() */
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
break;
}
@@ -2023,18 +2025,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
- if (var->unusable)
- s->attrib = 0;
- else {
- s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
- s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
- s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
- s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
- s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
- s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
- s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
- s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
- }
+ s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+ s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+ s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+ s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+ s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+ s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+ s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+ s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
/*
* This is always accurate, except if SYSRET returned to a segment
@@ -2043,7 +2041,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
* would entail passing the CPL to userspace and back.
*/
if (seg == VCPU_SREG_SS)
- svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+ /* This is symmetric with svm_get_segment() */
+ svm->vmcb->save.cpl = (var->dpl & 3);
mark_dirty(svm->vmcb, VMCB_SEG);
}
@@ -4919,7 +4918,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken.
*/
if (svm->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
@@ -5028,11 +5027,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
- rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
@@ -5448,6 +5447,12 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
avic_handle_ldr_update(vcpu);
}
+static void svm_setup_mce(struct kvm_vcpu *vcpu)
+{
+ /* [63:9] are reserved. */
+ vcpu->arch.mcg_cap &= 0x1ff;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -5563,6 +5568,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = svm_deliver_avic_intr,
.update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c51aaac953b4..b978aeccda78 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -49,6 +49,7 @@
#include <asm/kexec.h>
#include <asm/apic.h>
#include <asm/irq_remapping.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>
#include "trace.h"
@@ -1052,6 +1053,13 @@ static inline bool is_machine_check(u32 intr_info)
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}
+/* Undocumented: icebp/int1 */
+static inline bool is_icebp(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
static inline bool cpu_has_vmx_msr_bitmap(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@@ -5732,7 +5740,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
- if (!(dr6 & ~DR6_RESERVED)) /* icebp */
+ if (is_icebp(intr_info))
skip_emulated_instruction(vcpu);
kvm_queue_exception(vcpu, DB_VECTOR);
@@ -7916,11 +7924,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int cr = exit_qualification & 15;
- int reg = (exit_qualification >> 8) & 15;
- unsigned long val = kvm_register_readl(vcpu, reg);
+ int reg;
+ unsigned long val;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
+ reg = (exit_qualification >> 8) & 15;
+ val = kvm_register_readl(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@@ -7975,6 +7985,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
* cr0. Other attempted changes are ignored, with no exit.
*/
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
return true;
@@ -8906,7 +8917,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken.
*/
if (vmx->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
@@ -9041,11 +9052,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
- rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
@@ -10653,8 +10664,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
}
- if (nested_cpu_has_ept(vmcs12))
- vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+ vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
if (nested_cpu_has_vid(vmcs12))
vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c9bb294f52e..0bf6ed591f16 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3070,7 +3070,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return -EINVAL;
if (events->exception.injected &&
- (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
+ is_guest_mode(vcpu)))
return -EINVAL;
/* INITs are latched while in SMM */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 4ad7c4dd311c..6bf1898ddf49 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,7 +26,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_RETPOLINE) += retpoline.o
-OBJECT_FILES_NON_STANDARD_retpoline.o :=y
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 7e48807b2fa1..45a53dfe1859 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
movq %r12, 3*8(%rsp)
movq %r14, 4*8(%rsp)
movq %r13, 5*8(%rsp)
- movq %rbp, 6*8(%rsp)
+ movq %r15, 6*8(%rsp)
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
@@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
/* main loop. clear in 64 byte blocks */
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
- /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
.p2align 4
.Lloop:
source
@@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
source
movq 32(%rdi), %r10
source
- movq 40(%rdi), %rbp
+ movq 40(%rdi), %r15
source
movq 48(%rdi), %r14
source
@@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
adcq %r11, %rax
adcq %rdx, %rax
adcq %r10, %rax
- adcq %rbp, %rax
+ adcq %r15, %rax
adcq %r14, %rax
adcq %r13, %rax
@@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
dest
movq %r10, 32(%rsi)
dest
- movq %rbp, 40(%rsi)
+ movq %r15, 40(%rsi)
dest
movq %r14, 48(%rsi)
dest
@@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %r14
movq 5*8(%rsp), %r13
- movq 6*8(%rsp), %rbp
+ movq 6*8(%rsp), %r15
addq $7*8, %rsp
ret
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 121f59c6ee54..0c7fe444dcdd 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -5,6 +5,7 @@
* kernel starts. This file is included in the compressed kernel and
* normally linked in the regular.
*/
+#include <asm/asm.h>
#include <asm/kaslr.h>
#include <asm/msr.h>
#include <asm/archrandom.h>
@@ -79,7 +80,7 @@ unsigned long kaslr_get_random_long(const char *purpose)
}
/* Circular multiply for better bit diffusion */
- asm("mul %3"
+ asm(_ASM_MUL "%3"
: "=a" (random), "=d" (raw)
: "a" (random), "rm" (mix_const));
random += raw;
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 480edc3a5e03..c909961e678a 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -7,7 +7,6 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
#include <asm/nospec-branch.h>
-#include <asm/bitsperlong.h>
.macro THUNK reg
.section .text.__x86.indirect_thunk
@@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
GENERATE_THUNK(r14)
GENERATE_THUNK(r15)
#endif
-
-/*
- * Fill the CPU return stack buffer.
- *
- * Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
- *
- * This is required in various cases for retpoline and IBRS-based
- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
- * eliminate potentially bogus entries from the RSB, and sometimes
- * purely to ensure that it doesn't get empty, which on some CPUs would
- * allow predictions from other (unwanted!) sources to be used.
- *
- * Google experimented with loop-unrolling and this turned out to be
- * the optimal version - two calls, each with their own speculation
- * trap should their return address end up getting used, in a loop.
- */
-.macro STUFF_RSB nr:req sp:req
- mov $(\nr / 2), %_ASM_BX
- .align 16
-771:
- call 772f
-773: /* speculation trap */
- pause
- lfence
- jmp 773b
- .align 16
-772:
- call 774f
-775: /* speculation trap */
- pause
- lfence
- jmp 775b
- .align 16
-774:
- dec %_ASM_BX
- jnz 771b
- add $((BITS_PER_LONG/8) * \nr), \sp
-.endm
-
-#define RSB_FILL_LOOPS 16 /* To avoid underflow */
-
-ENTRY(__fill_rsb)
- STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
- ret
-END(__fill_rsb)
-EXPORT_SYMBOL_GPL(__fill_rsb)
-
-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
-
-ENTRY(__clear_rsb)
- STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
- ret
-END(__clear_rsb)
-EXPORT_SYMBOL_GPL(__clear_rsb)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 74dea7f14c20..ae23c996e3a8 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -343,7 +343,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (!pmd_k)
return -1;
- if (pmd_huge(*pmd_k))
+ if (pmd_large(*pmd_k))
return 0;
pte_k = pte_offset_kernel(pmd_k, address);
@@ -463,7 +463,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
BUG();
- if (pud_huge(*pud))
+ if (pud_large(*pud))
return 0;
pmd = pmd_offset(pud, address);
@@ -474,7 +474,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
BUG();
- if (pmd_huge(*pmd))
+ if (pmd_large(*pmd))
return 0;
pte_ref = pte_offset_kernel(pmd_ref, address);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 209b9465e97a..b97ef29c940f 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -643,4 +643,52 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
+
+/**
+ * pud_free_pmd_page - Clear pud entry and free pmd page.
+ * @pud: Pointer to a PUD.
+ *
+ * Context: The pud range has been unmaped and TLB purged.
+ * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ */
+int pud_free_pmd_page(pud_t *pud)
+{
+ pmd_t *pmd;
+ int i;
+
+ if (pud_none(*pud))
+ return 1;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ if (!pmd_free_pte_page(&pmd[i]))
+ return 0;
+
+ pud_clear(pud);
+ free_page((unsigned long)pmd);
+
+ return 1;
+}
+
+/**
+ * pmd_free_pte_page - Clear pmd entry and free pte page.
+ * @pmd: Pointer to a PMD.
+ *
+ * Context: The pmd range has been unmaped and TLB purged.
+ * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ */
+int pmd_free_pte_page(pmd_t *pmd)
+{
+ pte_t *pte;
+
+ if (pmd_none(*pmd))
+ return 1;
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
+ free_page((unsigned long)pte);
+
+ return 1;
+}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 578973ade71b..eac92e2d171b 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -10,6 +10,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
@@ -29,6 +30,8 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
+atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
+
struct flush_tlb_info {
struct mm_struct *flush_mm;
unsigned long flush_start;
@@ -104,6 +107,28 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
+ u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
+
+ /*
+ * Avoid user/user BTB poisoning by flushing the branch
+ * predictor when switching between processes. This stops
+ * one process from doing Spectre-v2 attacks on another.
+ *
+ * As an optimization, flush indirect branches only when
+ * switching into processes that disable dumping. This
+ * protects high value processes like gpg, without having
+ * too high performance overhead. IBPB is *expensive*!
+ *
+ * This will not flush branches when switching into kernel
+ * threads. It will also not flush if we switch to idle
+ * thread and back to the same process. It will flush if we
+ * switch to a different non-dumpable process.
+ */
+ if (tsk && tsk->mm &&
+ tsk->mm->context.ctx_id != last_ctx_id &&
+ get_dumpable(tsk->mm) != SUID_DUMP_USER)
+ indirect_branch_prediction_barrier();
+
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
/*
* If our current stack is in vmalloc space and isn't
@@ -118,6 +143,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
}
+ /*
+ * Record last user mm's context id, so we can avoid
+ * flushing branch buffer with IBPB if we switch back
+ * to the same user.
+ */
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 7840331d3056..cd9764520851 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
+#include <asm/nospec-branch.h>
#include <linux/bpf.h>
int bpf_jit_enable __read_mostly;
@@ -281,7 +282,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
-#define OFFSET1 43 /* number of bytes to jump */
+#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 32
+#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
@@ -304,7 +305,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* goto out;
*/
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
-#define OFFSET3 10
+#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JE, OFFSET3); /* je out */
label3 = cnt;
@@ -317,7 +318,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* rdi == ctx (1st arg)
* rax == prog->bpf_func + prologue_size
*/
- EMIT2(0xFF, 0xE0); /* jmp rax */
+ RETPOLINE_RAX_BPF_JIT();
/* out: */
BUILD_BUG_ON(cnt - label1 != OFFSET1);
@@ -1134,7 +1135,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* may converge on the last pass. In such case do one more
* pass to emit the final image
*/
- for (pass = 0; pass < 10 || image; pass++) {
+ for (pass = 0; pass < 20 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) {
image = NULL;
@@ -1161,6 +1162,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
}
oldproglen = proglen;
+ cond_resched();
}
if (bpf_jit_enable > 1)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 28c04123b6dd..842ca3cab6b3 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -472,7 +472,7 @@ static int nmi_setup(void)
goto fail;
for_each_possible_cpu(cpu) {
- if (!cpu)
+ if (!IS_ENABLED(CONFIG_SMP) || !cpu)
continue;
memcpy(per_cpu(cpu_msrs, cpu).counters,
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 274dfc481849..a0e85f2aff7d 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -832,9 +832,11 @@ static void __init kexec_enter_virtual_mode(void)
/*
* We don't do virtual mode, since we don't do runtime services, on
- * non-native EFI
+ * non-native EFI. With efi=old_map, we don't do runtime services in
+ * kexec kernel because in the initial boot something else might
+ * have been mapped at these virtual addresses.
*/
- if (!efi_is_native()) {
+ if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
efi_memmap_unmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 7850128f0026..834783bc6752 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
static void intel_mid_reboot(void)
{
- intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
}
static unsigned long __init intel_mid_calibrate_tsc(void)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 73eb7fd4aec4..5b6c8486a0be 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -769,9 +769,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
break;
case R_X86_64_PC32:
+ case R_X86_64_PLT32:
/*
* PC relative relocations don't need to be adjusted unless
* referencing a percpu symbol.
+ *
+ * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
*/
if (is_percpu_sym(sym, symname))
add_reloc(&relocs32neg, offset);
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 1518d2805ae8..27361cbb7ca9 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -6,11 +6,12 @@
#include <sysdep/stub.h>
#include <sysdep/faultinfo.h>
#include <sysdep/mcontext.h>
+#include <sys/ucontext.h>
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
- struct ucontext *uc = p;
+ ucontext_t *uc = p;
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 137afbbd0590..a11540e51f62 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -299,35 +299,46 @@ static void __init xen_filter_cpu_maps(void)
}
-static void __init xen_smp_prepare_boot_cpu(void)
+static void __init xen_pv_smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != 0);
native_smp_prepare_boot_cpu();
- if (xen_pv_domain()) {
- if (!xen_feature(XENFEAT_writable_page_tables))
- /* We've switched to the "real" per-cpu gdt, so make
- * sure the old memory can be recycled. */
- make_lowmem_page_readwrite(xen_initial_gdt);
+ if (!xen_feature(XENFEAT_writable_page_tables))
+ /* We've switched to the "real" per-cpu gdt, so make
+ * sure the old memory can be recycled. */
+ make_lowmem_page_readwrite(xen_initial_gdt);
#ifdef CONFIG_X86_32
- /*
- * Xen starts us with XEN_FLAT_RING1_DS, but linux code
- * expects __USER_DS
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
+ /*
+ * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+ * expects __USER_DS
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
#endif
- xen_filter_cpu_maps();
- xen_setup_vcpu_info_placement();
- }
+ xen_filter_cpu_maps();
+ xen_setup_vcpu_info_placement();
+
+ /*
+ * The alternative logic (which patches the unlock/lock) runs before
+ * the smp bootup up code is activated. Hence we need to set this up
+ * the core kernel is being patched. Otherwise we will have only
+ * modules patched but not core code.
+ */
+ xen_init_spinlocks();
+}
+
+static void __init xen_hvm_smp_prepare_boot_cpu(void)
+{
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
/*
* Setup vcpu_info for boot CPU.
*/
- if (xen_hvm_domain())
- xen_vcpu_setup(0);
+ xen_vcpu_setup(0);
/*
* The alternative logic (which patches the unlock/lock) runs before
@@ -733,7 +744,7 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
}
static const struct smp_ops xen_smp_ops __initconst = {
- .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
+ .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
.smp_cpus_done = xen_smp_cpus_done,
@@ -772,5 +783,5 @@ void __init xen_hvm_smp_init(void)
smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
- smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
+ smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
}
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 7f664c416faf..4ecd0de08557 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -1,11 +1,14 @@
#include <linux/types.h>
#include <linux/tick.h>
+#include <linux/percpu-defs.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/grant_table.h>
#include <xen/events.h>
+#include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
@@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled)
xen_mm_unpin_all();
}
+static DEFINE_PER_CPU(u64, spec_ctrl);
+
void xen_arch_pre_suspend(void)
{
if (xen_pv_domain())
@@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled)
static void xen_vcpu_notify_restore(void *data)
{
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+ wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
+
/* Boot processor notified via generic timekeeping_resume() */
if (smp_processor_id() == 0)
return;
@@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data)
static void xen_vcpu_notify_suspend(void *data)
{
+ u64 tmp;
+
tick_suspend_local();
+
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
+ rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+ this_cpu_write(spec_ctrl, tmp);
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ }
}
void xen_arch_resume(void)
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 72bfc1cbc2b5..5bfbc1c401d4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (ret)
- return ret;
+ if (!ret)
+ *oval = oldval;
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
- case FUTEX_OP_CMP_NE: return (oldval != cmparg);
- case FUTEX_OP_CMP_LT: return (oldval < cmparg);
- case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
- case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
- case FUTEX_OP_CMP_GT: return (oldval > cmparg);
- }
-
- return -ENOSYS;
+ return ret;
}
static inline int
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 80e4cfb2471a..d5abdf8878fe 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -77,19 +77,75 @@ void __init zones_init(void)
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
+#ifdef CONFIG_HIGHMEM
+static void __init free_area_high(unsigned long pfn, unsigned long end)
+{
+ for (; pfn < end; pfn++)
+ free_highmem_page(pfn_to_page(pfn));
+}
+
+static void __init free_highpages(void)
+{
+ unsigned long max_low = max_low_pfn;
+ struct memblock_region *mem, *res;
+
+ reset_all_zones_managed_pages();
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ if (memblock_is_nomap(mem))
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ free_area_high(start, res_start);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ free_area_high(start, end);
+ }
+}
+#else
+static void __init free_highpages(void)
+{
+}
+#endif
+
/*
* Initialize memory pages.
*/
void __init mem_init(void)
{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- reset_all_zones_managed_pages();
- for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-#endif
+ free_highpages();
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 63f72f00c72e..80dedde0de73 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
if (!bio_is_rw(bio))
return false;
+ if (!bio_sectors(bio))
+ return false;
+
/* Already protected? */
if (bio_integrity(bio))
return false;
diff --git a/block/bio.c b/block/bio.c
index 07f287b14cff..4f93345c6a82 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -42,9 +42,9 @@
* break badly! cannot be bigger than what you can fit into an
* unsigned short
*/
-#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
+#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
- BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
+ BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
};
#undef BV
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b08ccbb9393a..6cd839c1f507 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1078,10 +1078,8 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
- if (IS_ERR(blkg)) {
- blkg_free(new_blkg);
+ if (IS_ERR(blkg))
return PTR_ERR(blkg);
- }
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 82500641f37b..e0a804ab5420 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1285,13 +1285,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio, q->bio_split);
-
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
@@ -1612,7 +1612,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
{
unsigned flush_start_tag = set->queue_depth;
- blk_mq_tag_idle(hctx);
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
set->ops->exit_request(set->driver_data,
@@ -1927,6 +1928,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
blk_mq_sysfs_unregister(q);
+
+ /* protect against switching io scheduler */
+ mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
@@ -1976,6 +1980,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
}
q->nr_hw_queues = i;
+ mutex_unlock(&q->sysfs_lock);
blk_mq_sysfs_register(q);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a3ea8260c94c..3a4c9a3c1427 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -499,6 +499,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
unsigned long expires)
{
+ unsigned long max_expire = jiffies + 8 * throtl_slice;
+
+ /*
+ * Since we are adjusting the throttle limit dynamically, the sleep
+ * time calculated according to previous limit might be invalid. It's
+ * possible the cgroup sleep time is very long and no other cgroups
+ * have IO running so notify the limit changes. Make sure the cgroup
+ * doesn't sleep too long to avoid the missed notification.
+ */
+ if (time_after(expires, max_expire))
+ expires = max_expire;
mod_timer(&sq->pending_timer, expires);
throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
expires - jiffies, jiffies);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index a2437c006640..298c05f8b5e3 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -321,8 +321,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
if (info) {
struct partition_meta_info *pinfo = alloc_part_info(disk);
- if (!pinfo)
+ if (!pinfo) {
+ err = -ENOMEM;
goto out_free_stats;
+ }
memcpy(pinfo, info, sizeof(*info));
p->info = pinfo;
}
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 5610cd537da7..7d8d50c11ce7 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,7 +300,9 @@ static void parse_bsd(struct parsed_partitions *state,
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
- if (memcmp(flavour, "bsd\0", 4) == 0)
+ /* FreeBSD has relative offset if C partition offset is zero */
+ if (memcmp(flavour, "bsd\0", 4) == 0 &&
+ le32_to_cpu(l->d_partitions[2].p_offset) == 0)
bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ca50eeb13097..b5953f1d1a18 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -157,16 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;
- /* If caller uses non-allowed flag, return error. */
- if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
- return -EINVAL;
-
if (sock->state == SS_CONNECTED)
return -EINVAL;
if (addr_len != sizeof(*sa))
return -EINVAL;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 14402ef6d826..90d73a22f129 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -91,13 +91,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
- walk->data += walk->offset;
-
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
- return nbytes;
+ if (nbytes) {
+ walk->data += walk->offset;
+ return nbytes;
+ }
}
if (walk->flags & CRYPTO_ALG_ASYNC)
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 5a37962d2199..df1bde273bba 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -261,7 +261,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
sinfo->index);
return 0;
}
- ret = public_key_verify_signature(p->pub, p->sig);
+ ret = public_key_verify_signature(p->pub, x509->sig);
if (ret < 0)
return ret;
x509->signer = p;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 4955eb66e361..8525fe474abd 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -93,9 +93,11 @@ int public_key_verify_signature(const struct public_key *pkey,
BUG_ON(!pkey);
BUG_ON(!sig);
- BUG_ON(!sig->digest);
BUG_ON(!sig->s);
+ if (!sig->digest)
+ return -ENOPKG;
+
alg_name = sig->pkey_algo;
if (strcmp(sig->pkey_algo, "rsa") == 0) {
/* The data wangled by the RSA algorithm is typically padded
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 19d1afb9890f..09b1374dc619 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -66,8 +66,9 @@ __setup("ca_keys=", ca_keys_setup);
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
* matching parent certificate in the trusted list, -EKEYREJECTED if the
- * signature check fails or the key is blacklisted and some other error if
- * there is a matching certificate but the signature check cannot be performed.
+ * signature check fails or the key is blacklisted, -ENOPKG if the signature
+ * uses unsupported crypto, or some other error if there is a matching
+ * certificate but the signature check cannot be performed.
*/
int restrict_link_by_signature(struct key *trust_keyring,
const struct key_type *type,
@@ -86,6 +87,8 @@ int restrict_link_by_signature(struct key *trust_keyring,
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
+ if (!sig)
+ return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 029f7051f2be..ce2df8c9c583 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -102,6 +102,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
}
}
+ ret = -ENOMEM;
cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
if (!cert->pub->key)
goto error_decode;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f83de99d7d71..56bd612927ab 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 942ddff68408..4bb5f93c94cd 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
if (!drbg)
return;
kzfree(drbg->Vbuf);
+ drbg->Vbuf = NULL;
drbg->V = NULL;
kzfree(drbg->Cbuf);
+ drbg->Cbuf = NULL;
drbg->C = NULL;
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c5557d070954..667dc5c86fef 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -87,8 +87,8 @@ MODULE_PARM_DESC(report_key_events,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -2069,6 +2069,25 @@ static int __init intel_opregion_present(void)
return opregion;
}
+static bool dmi_is_desktop(void)
+{
+ const char *chassis_type;
+
+ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ if (!chassis_type)
+ return false;
+
+ if (!strcmp(chassis_type, "3") || /* 3: Desktop */
+ !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
+ !strcmp(chassis_type, "5") || /* 5: Pizza Box */
+ !strcmp(chassis_type, "6") || /* 6: Mini Tower */
+ !strcmp(chassis_type, "7") || /* 7: Tower */
+ !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
+ return true;
+
+ return false;
+}
+
int acpi_video_register(void)
{
int ret = 0;
@@ -2082,6 +2101,20 @@ int acpi_video_register(void)
goto leave;
}
+ /*
+ * We're seeing a lot of bogus backlight interfaces on newer machines
+ * without a LCD such as desktops, servers and HDMI sticks. Checking
+ * the lcd flag fixes this, so enable this on any machines which are
+ * win8 ready (where we also prefer the native backlight driver, so
+ * normally the acpi_video code should not register there anyways).
+ */
+ if (only_lcd == -1) {
+ if (dmi_is_desktop() && acpi_osi_is_win8())
+ only_lcd = true;
+ else
+ only_lcd = false;
+ }
+
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 13caebd679f5..ce8fc680785b 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
res.start = gas->address;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
res.flags = IORESOURCE_MEM;
- res.end = res.start + ALIGN(gas->access_width, 4);
+ res.end = res.start + ALIGN(gas->access_width, 4) - 1;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
res.flags = IORESOURCE_IO;
- res.end = res.start + gas->access_width;
+ res.end = res.start + gas->access_width - 1;
} else {
pr_warn("Unsupported address space: %u\n",
gas->space_id);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 9179e9abe3db..35a2d9ea0654 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_enable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_disable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
ACPI_FUNCTION_TRACE(acpi_clear_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index db0e90342e82..ac2e8dfdf74e 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
(u32)(aml_offset +
sizeof(struct acpi_table_header)));
+ ACPI_ERROR((AE_INFO,
+ "Aborting disassembly, AML byte code is corrupt"));
+
/* Dump the context surrounding the invalid opcode */
acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
sizeof(struct acpi_table_header) -
16));
acpi_os_printf(" */\n");
+
+ /*
+ * Just abort the disassembly, cannot continue because the
+ * parser is essentially lost. The disassembler can then
+ * randomly fail because an ill-constructed parse tree
+ * can result.
+ */
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
#endif
}
@@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
if (status == AE_CTRL_PARSE_CONTINUE) {
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* Create Op structure and append to parent's argument list */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index c3bcb7f5986e..307b3e28f34c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1518,7 +1518,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return ret;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7af789e..dd70d6c2bca0 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
return -ENOMEM;
}
- if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+ if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
goto error;
if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock))
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 08b3ca0ead69..b012e94b7d9f 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -158,7 +158,7 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- unsigned long gpe;
+ u32 gpe;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b1815b20a99c..3874eec972cd 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -967,8 +967,11 @@ static ssize_t scrub_show(struct device *dev,
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ mutex_lock(&acpi_desc->init_mutex);
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
- (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
+ work_busy(&acpi_desc->work)
+ && !acpi_desc->cancel ? "+\n" : "\n");
+ mutex_unlock(&acpi_desc->init_mutex);
}
device_unlock(dev);
return rc;
@@ -2547,15 +2550,21 @@ static void acpi_nfit_scrub(struct work_struct *work)
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc;
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
- if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
- /* BLK regions don't need to wait for ars results */
- rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
- if (rc)
- return rc;
- }
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ int rc, type = nfit_spa_type(nfit_spa->spa);
+
+ /* PMEM and VMEM will be registered by the ARS workqueue */
+ if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE)
+ continue;
+ /* BLK apertures belong to BLK region registration below */
+ if (type == NFIT_SPA_BDW)
+ continue;
+ /* BLK regions don't need to wait for ARS results */
+ rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+ if (rc)
+ return rc;
+ }
queue_work(nfit_wq, &acpi_desc->work);
return 0;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index ce3a7a16f03f..17b518cb787c 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
*/
int acpi_map_pxm_to_online_node(int pxm)
{
- int node, n, dist, min_dist;
+ int node, min_node;
node = acpi_map_pxm_to_node(pxm);
if (node == NUMA_NO_NODE)
node = 0;
+ min_node = node;
if (!node_online(node)) {
- min_dist = INT_MAX;
+ int min_dist = INT_MAX, dist, n;
+
for_each_online_node(n) {
dist = node_distance(node, n);
if (dist < min_dist) {
min_dist = dist;
- node = n;
+ min_node = n;
}
}
}
- return node;
+ return min_node;
}
EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index e6e991ac20f3..0c6874e6a9b6 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -28,97 +28,97 @@ static struct pmic_table power_table[] = {
.address = 0x00,
.reg = 0x13,
.bit = 0x05,
- },
+ }, /* ALD1 */
{
.address = 0x04,
.reg = 0x13,
.bit = 0x06,
- },
+ }, /* ALD2 */
{
.address = 0x08,
.reg = 0x13,
.bit = 0x07,
- },
+ }, /* ALD3 */
{
.address = 0x0c,
.reg = 0x12,
.bit = 0x03,
- },
+ }, /* DLD1 */
{
.address = 0x10,
.reg = 0x12,
.bit = 0x04,
- },
+ }, /* DLD2 */
{
.address = 0x14,
.reg = 0x12,
.bit = 0x05,
- },
+ }, /* DLD3 */
{
.address = 0x18,
.reg = 0x12,
.bit = 0x06,
- },
+ }, /* DLD4 */
{
.address = 0x1c,
.reg = 0x12,
.bit = 0x00,
- },
+ }, /* ELD1 */
{
.address = 0x20,
.reg = 0x12,
.bit = 0x01,
- },
+ }, /* ELD2 */
{
.address = 0x24,
.reg = 0x12,
.bit = 0x02,
- },
+ }, /* ELD3 */
{
.address = 0x28,
.reg = 0x13,
.bit = 0x02,
- },
+ }, /* FLD1 */
{
.address = 0x2c,
.reg = 0x13,
.bit = 0x03,
- },
+ }, /* FLD2 */
{
.address = 0x30,
.reg = 0x13,
.bit = 0x04,
- },
+ }, /* FLD3 */
{
- .address = 0x38,
+ .address = 0x34,
.reg = 0x10,
.bit = 0x03,
- },
+ }, /* BUC1 */
{
- .address = 0x3c,
+ .address = 0x38,
.reg = 0x10,
.bit = 0x06,
- },
+ }, /* BUC2 */
{
- .address = 0x40,
+ .address = 0x3c,
.reg = 0x10,
.bit = 0x05,
- },
+ }, /* BUC3 */
{
- .address = 0x44,
+ .address = 0x40,
.reg = 0x10,
.bit = 0x04,
- },
+ }, /* BUC4 */
{
- .address = 0x48,
+ .address = 0x44,
.reg = 0x10,
.bit = 0x01,
- },
+ }, /* BUC5 */
{
- .address = 0x4c,
+ .address = 0x48,
.reg = 0x10,
.bit = 0x00
- },
+ }, /* BUC6 */
};
/* TMP0 - TMP5 are the same, all from GPADC */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1c2b846c5776..3a6c9b741b23 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -864,6 +864,16 @@ void acpi_resume_power_resources(void)
mutex_unlock(&resource->resource_lock);
}
+
+ mutex_unlock(&power_resource_list_lock);
+}
+
+void acpi_turn_off_unused_power_resources(void)
+{
+ struct acpi_power_resource *resource;
+
+ mutex_lock(&power_resource_list_lock);
+
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
int result, state;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 9d5f0c7ed3f7..8697a82bd465 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -251,6 +251,9 @@ static int __acpi_processor_start(struct acpi_device *device)
if (ACPI_SUCCESS(status))
return 0;
+ result = -ENODEV;
+ acpi_pss_perf_exit(pr, device);
+
err_power_exit:
acpi_processor_power_exit(pr);
return result;
@@ -259,11 +262,16 @@ err_power_exit:
static int acpi_processor_start(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
+ int ret;
if (!device)
return -ENODEV;
- return __acpi_processor_start(device);
+ /* Protect against concurrent CPU hotplug operations */
+ get_online_cpus();
+ ret = __acpi_processor_start(device);
+ put_online_cpus();
+ return ret;
}
static int acpi_processor_stop(struct device *dev)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index d51ca1c05619..207e9bbb9490 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr,
- int state, bool force);
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct);
static int acpi_processor_update_tsd_coord(void)
{
@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
- ret = acpi_processor_set_throttling(pr, state, true);
+ ret = __acpi_processor_set_throttling(pr, state, true,
+ true);
if (ret)
return ret;
}
@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
return 0;
}
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static long __acpi_processor_get_throttling(void *data)
{
- cpumask_var_t saved_mask;
- int ret;
+ struct acpi_processor *pr = data;
+
+ return pr->throttling.acpi_processor_get_throttling(pr);
+}
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
/*
- * Migrate task to the cpu pointed by pr.
+ * This is either called from the CPU hotplug callback of
+ * processor_driver or via the ACPI probe function. In the latter
+ * case the CPU is not guaranteed to be online. Both call sites are
+ * protected against CPU hotplug.
*/
- cpumask_copy(saved_mask, &current->cpus_allowed);
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the target pr->id CPU. Exit */
- free_cpumask_var(saved_mask);
+ if (!cpu_online(pr->id))
return -ENODEV;
- }
- ret = pr->throttling.acpi_processor_get_throttling(pr);
- /* restore the previous state */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(saved_mask);
- return ret;
+ return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data)
arg->target_state, arg->force);
}
-int acpi_processor_set_throttling(struct acpi_processor *pr,
- int state, bool force)
+static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
+{
+ if (direct)
+ return fn(arg);
+ return work_on_cpu(cpu, fn, arg);
+}
+
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct)
{
int ret = 0;
unsigned int i;
@@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = pr;
arg.target_state = state;
arg.force = force;
- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
+ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
+ direct);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
- &arg);
+ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg, direct);
}
}
/*
@@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return ret;
}
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
+ bool force)
+{
+ return __acpi_processor_set_throttling(pr, state, force, false);
+}
+
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a4327af676fe..097d630ab886 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -474,6 +474,7 @@ static void acpi_pm_start(u32 acpi_state)
*/
static void acpi_pm_end(void)
{
+ acpi_turn_off_unused_power_resources();
acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index a9cc34e663f9..a82ff74faf7a 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -6,6 +6,7 @@ extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
extern void acpi_resume_power_resources(void);
+extern void acpi_turn_off_unused_power_resources(void);
static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
{
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25c82e4..cdc47375178e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -214,6 +214,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
+ .callback = video_detect_force_video,
+ .ident = "SAMSUNG 670Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
.callback = video_detect_force_video,
.ident = "SAMSUNG 730U3E/740U3E",
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index a56fa2a1e9aa..93888ccb4e26 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
+ ssize_t len;
- if (!dev->driver_override)
- return 0;
-
- return sprintf(buf, "%s\n", dev->driver_override);
+ device_lock(_dev);
+ len = sprintf(buf, "%s\n", dev->driver_override);
+ device_unlock(_dev);
+ return len;
}
static ssize_t driver_override_store(struct device *_dev,
@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old = dev->driver_override, *cp;
+ char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
if (cp)
*cp = '\0';
+ device_lock(_dev);
+ old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
+ device_unlock(_dev);
kfree(old);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 3b6ac80b2127..49199bd2ab93 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2628,8 +2628,10 @@ static unsigned int binder_poll(struct file *filp,
binder_lock(__func__);
thread = binder_get_thread(proc);
- if (!thread)
+ if (!thread) {
+ binder_unlock(__func__);
return POLLERR;
+ }
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo) && thread->return_error == BR_OK;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9b46ef4c851e..4d4b5f607b81 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -539,7 +539,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
- { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
.driver_data = board_ahci_yes_fbs },
/* Promise */
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b9081c..cd2eab6aa92e 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "no irq\n");
+ return irq;
}
hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index aee39524375c..4fe3ec122bf0 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4403,6 +4403,28 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* Crucial BX100 SSD 500GB has broken LPM support */
+ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+
+ /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+ { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+ /* 512GB MX100 with newer firmware has only LPM issues */
+ { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
+ /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
+ { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+ { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
+ /* Sandisk devices which are known to not handle LPM well */
+ { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
+
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4414,7 +4436,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -5265,8 +5289,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
* We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- if (WARN_ON_ONCE(ata_is_data(prot) &&
- (!qc->sg || !qc->n_elem || !qc->nbytes)))
+ if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
goto sys_err;
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e3e10e8f6f6a..9babbc845750 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3226,6 +3226,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
}
+ /* We may not issue NCQ commands to devices not supporting NCQ */
+ if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
+ fp = 1;
+ goto invalid_fld;
+ }
+
/* sanity check for pio multi commands */
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
fp = 1;
@@ -4177,7 +4183,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
/* relay SCSI command to ATAPI device */
int len = COMMAND_SIZE(scsi_op);
- if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
+ if (unlikely(len > scmd->cmd_len ||
+ len > dev->cdb_len ||
+ scmd->cmd_len > ATAPI_CDB_LEN))
goto bad_cdb_len;
xlat_func = atapi_xlat;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d3dc95484161..81bfeec67b77 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/byteorder.h>
#include <asm/string.h>
#include <asm/io.h>
@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
spin_lock_irqsave(&zatm_dev->lock, flags);
info = zatm_dev->pool_info[pool];
if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ae63bb0875ea..a7b0fc7cb468 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1736,7 +1736,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (map->max_raw_write && map->max_raw_write > val_len)
+ if (map->max_raw_write && map->max_raw_write < val_len)
return -E2BIG;
map->lock(map->lock_arg);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 402254d26247..ff1c4d7aa025 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
+ iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos);
@@ -612,6 +612,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
*/
static int loop_flush(struct loop_device *lo)
{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (lo->lo_state != Lo_bound)
+ return 0;
return loop_switch(lo, NULL);
}
@@ -1107,11 +1110,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
+ if (type >= MAX_LO_CRYPT) {
+ err = -EINVAL;
+ goto exit;
+ }
xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
+ if (xfer == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
} else
xfer = NULL;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 08c2c93887c1..3aac78a5091b 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -85,7 +85,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- kfree_skb(skb);
+ if (!ret)
+ kfree_skb(skb);
return ret;
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 9497c469efd2..2230f9368c21 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -113,16 +113,21 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
{
struct sk_buff *skb = hu->tx_skb;
- if (!skb)
- skb = hu->proto->dequeue(hu);
- else
+ if (!skb) {
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ skb = hu->proto->dequeue(hu);
+ } else {
hu->tx_skb = NULL;
+ }
return skb;
}
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ return 0;
+
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
return 0;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 6c867fbc56a7..74b2f4a14643 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -936,6 +936,9 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
+ ret = 0;
}
/* Setup bdaddr */
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 72fe0a5a8bf3..017c37b9c7c1 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -37,8 +37,6 @@
#define ARB_ERR_CAP_CLEAR (1 << 0)
#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
-#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
-#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
@@ -47,7 +45,6 @@ enum {
ARB_ERR_CAP_CLR,
ARB_ERR_CAP_HI_ADDR,
ARB_ERR_CAP_ADDR,
- ARB_ERR_CAP_DATA,
ARB_ERR_CAP_STATUS,
ARB_ERR_CAP_MASTER,
};
@@ -57,7 +54,6 @@ static const int gisb_offsets_bcm7038[] = {
[ARB_ERR_CAP_CLR] = 0x0c4,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0c8,
- [ARB_ERR_CAP_DATA] = 0x0cc,
[ARB_ERR_CAP_STATUS] = 0x0d0,
[ARB_ERR_CAP_MASTER] = -1,
};
@@ -67,7 +63,6 @@ static const int gisb_offsets_bcm7400[] = {
[ARB_ERR_CAP_CLR] = 0x0c8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0cc,
- [ARB_ERR_CAP_DATA] = 0x0d0,
[ARB_ERR_CAP_STATUS] = 0x0d4,
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
@@ -77,7 +72,6 @@ static const int gisb_offsets_bcm7435[] = {
[ARB_ERR_CAP_CLR] = 0x168,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x16c,
- [ARB_ERR_CAP_DATA] = 0x170,
[ARB_ERR_CAP_STATUS] = 0x174,
[ARB_ERR_CAP_MASTER] = 0x178,
};
@@ -87,7 +81,6 @@ static const int gisb_offsets_bcm7445[] = {
[ARB_ERR_CAP_CLR] = 0x7e4,
[ARB_ERR_CAP_HI_ADDR] = 0x7e8,
[ARB_ERR_CAP_ADDR] = 0x7ec,
- [ARB_ERR_CAP_DATA] = 0x7f0,
[ARB_ERR_CAP_STATUS] = 0x7f4,
[ARB_ERR_CAP_MASTER] = 0x7f8,
};
@@ -109,9 +102,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
{
int offset = gdev->gisb_offsets[reg];
- /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
- if (offset == -1)
- return 1;
+ if (offset < 0) {
+ /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+ if (reg == ARB_ERR_CAP_MASTER)
+ return 1;
+ else
+ return 0;
+ }
if (gdev->big_endian)
return ioread32be(gdev->base + offset);
@@ -119,6 +116,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
return ioread32(gdev->base + offset);
}
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
{
int offset = gdev->gisb_offsets[reg];
@@ -127,9 +134,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
return;
if (gdev->big_endian)
- iowrite32be(val, gdev->base + reg);
+ iowrite32be(val, gdev->base + offset);
else
- iowrite32(val, gdev->base + reg);
+ iowrite32(val, gdev->base + offset);
}
static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -185,7 +192,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
const char *reason)
{
u32 cap_status;
- unsigned long arb_addr;
+ u64 arb_addr;
u32 master;
const char *m_name;
char m_fmt[11];
@@ -197,10 +204,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
return 1;
/* Read the address and master */
- arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
-#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
- arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
-#endif
+ arb_addr = gisb_read_address(gdev);
master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
m_name = brcmstb_gisb_master_to_str(gdev, master);
@@ -209,7 +213,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
m_name = m_fmt;
}
- pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+ pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
__func__, reason, arb_addr,
cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 5d475b3a0b2e..128ebd439221 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2368,7 +2368,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 0f7d28a98b9a..a7cc5b7be598 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -871,6 +871,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
}
}
wmb();
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 510fc104bcdc..f11c1c7e84c6 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 909311016108..055d2ce378a7 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -515,7 +515,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
+ atomic_add(1, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -525,7 +525,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_sub(2, &panic_done_count);
+ atomic_sub(1, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -549,12 +549,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
+ atomic_add(1, &panic_done_count);
rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
+ atomic_sub(1, &panic_done_count);
printk(KERN_WARNING PFX
"Unable to extend the watchdog timeout.");
} else {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 25ee319dc8e3..8f962a751c17 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,6 +259,7 @@
#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
@@ -435,8 +436,9 @@ struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 0))
+#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
+static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA20_BLOCK_SIZE]);
@@ -444,6 +446,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
+static struct ratelimit_state unseeded_warning =
+ RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -742,7 +754,7 @@ retry:
static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
- const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ const int nbits_max = r->poolinfo->poolwords * 32;
if (nbits < 0)
return -EINVAL;
@@ -801,7 +813,7 @@ static int crng_fast_load(const char *cp, size_t len)
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
- if (crng_ready()) {
+ if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
@@ -819,6 +831,39 @@ static int crng_fast_load(const char *cp, size_t len)
return 1;
}
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+ int i;
+ struct crng_state *crng;
+ struct crng_state **pool;
+
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(i) {
+ crng = kmalloc_node(sizeof(struct crng_state),
+ GFP_KERNEL | __GFP_NOFAIL, i);
+ spin_lock_init(&crng->lock);
+ crng_initialize(crng);
+ pool[i] = crng;
+ }
+ mb();
+ if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ for_each_node(i)
+ kfree(pool[i]);
+ kfree(pool);
+ }
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+ schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
{
unsigned long flags;
@@ -837,7 +882,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
_crng_backtrack_protect(&primary_crng, buf.block,
CHACHA20_KEY_SIZE);
}
- spin_lock_irqsave(&primary_crng.lock, flags);
+ spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
@@ -848,12 +893,25 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
if (crng == &primary_crng && crng_init < 2) {
+ numa_crng_init();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("random: %d get_random_xx warning(s) missed "
+ "due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("random: %d urandom warning(s) missed "
+ "due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
+ spin_unlock_irqrestore(&crng->lock, flags);
}
static inline void maybe_reseed_primary_crng(void)
@@ -873,8 +931,9 @@ static void _extract_crng(struct crng_state *crng,
{
unsigned long v, flags;
- if (crng_init > 1 &&
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
+ if (crng_ready() &&
+ (time_after(crng_global_init_time, crng->init_time) ||
+ time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
spin_lock_irqsave(&crng->lock, flags);
if (arch_get_random_long(&v))
@@ -1113,12 +1172,16 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
+ unsigned int idx;
if (regs == NULL)
return 0;
- if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
- f->reg_idx = 0;
- return *(ptr + f->reg_idx++);
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}
void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
@@ -1146,7 +1209,7 @@ void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- if (!crng_ready()) {
+ if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
sizeof(fast_pool->pool))) {
@@ -1652,28 +1715,14 @@ static void init_std_data(struct entropy_store *r)
*/
static int rand_initialize(void)
{
-#ifdef CONFIG_NUMA
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-#endif
-
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
-
-#ifdef CONFIG_NUMA
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize(crng);
- pool[i] = crng;
+ crng_global_init_time = jiffies;
+ if (ratelimit_disable) {
+ urandom_warning.interval = 0;
+ unseeded_warning.interval = 0;
}
- mb();
- crng_node_pool = pool;
-#endif
return 0;
}
early_initcall(rand_initialize);
@@ -1741,9 +1790,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
- printk(KERN_NOTICE "random: %s: uninitialized urandom read "
- "(%zd bytes read)\n",
- current->comm, nbytes);
+ if (__ratelimit(&urandom_warning))
+ printk(KERN_NOTICE "random: %s: uninitialized "
+ "urandom read (%zd bytes read)\n",
+ current->comm, nbytes);
spin_lock_irqsave(&primary_crng.lock, flags);
crng_init_cnt = 0;
spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -1847,6 +1897,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
input_pool.entropy_count = 0;
blocking_pool.entropy_count = 0;
return 0;
+ case RNDRESEEDCRNG:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (crng_init < 2)
+ return -ENODATA;
+ crng_reseed(&primary_crng, NULL);
+ crng_global_init_time = jiffies - 1;
+ return 0;
default:
return -EINVAL;
}
@@ -2144,7 +2202,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
- if (!crng_ready()) {
+ if (unlikely(crng_init == 0)) {
crng_fast_load(buffer, count);
return;
}
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 6f060c76217b..7205e6da16cd 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -458,7 +458,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count)
{
int size = 0;
- int expected;
+ u32 expected;
if (!chip)
return -EBUSY;
@@ -475,7 +475,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
}
expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index 912ad30be585..65b824954bdc 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -136,6 +136,12 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ if (in_size < 6 ||
+ in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EINVAL;
+ }
+
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d0ac2d56520f..830d7e30e508 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1078,6 +1078,11 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
break;
recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+ if (recd > num_bytes) {
+ total = -EFAULT;
+ break;
+ }
+
memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
dest += recd;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 17896d654033..a5780ebe15ef 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -668,6 +668,11 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
if (!rc) {
data_len = be16_to_cpup(
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
+
data = &buf.data[TPM_HEADER_SIZE + 6];
memcpy(payload->key, data, data_len - 1);
@@ -675,6 +680,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
payload->migratable = data[data_len - 1];
}
+out:
tpm_buf_destroy(&buf);
return rc;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 62ee44e57ddc..da69ddea56cf 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -437,7 +437,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -452,7 +453,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if ((size_t) expected > count) {
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index c6428771841f..caa86b19c76d 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
- int expected, status, burst_count, retries, size = 0;
+ int status;
+ int burst_count;
+ int retries;
+ int size = 0;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
i2c_nuvoton_ready(chip); /* return to idle */
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* to machine native
*/
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < size) {
dev_err(dev, "%s() expected > count\n", __func__);
size = -EIO;
continue;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 247330efd310..b17076312e64 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -123,7 +123,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
}
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value)
+ const u8 *value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 4d24ec3d7cd6..f9aa47ec7af7 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -208,7 +208,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -223,7 +224,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
@@ -256,7 +257,7 @@ out:
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
@@ -345,7 +346,7 @@ static void disable_interrupts(struct tpm_chip *chip)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc;
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 9191aabbf9c2..e1c2193f2ed3 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value);
+ const u8 *value);
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
@@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
}
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
return data->phy_ops->write_bytes(data, addr, len, value);
}
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 3b97b14c3417..01eccb193b5a 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -47,9 +47,7 @@
struct tpm_tis_spi_phy {
struct tpm_tis_data priv;
struct spi_device *spi_device;
-
- u8 tx_buf[4];
- u8 rx_buf[4];
+ u8 *iobuf;
};
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -58,7 +56,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
}
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *buffer, u8 direction)
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -72,14 +70,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
- phy->tx_buf[0] = direction | (transfer_len - 1);
- phy->tx_buf[1] = 0xd4;
- phy->tx_buf[2] = addr >> 8;
- phy->tx_buf[3] = addr;
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
memset(&spi_xfer, 0, sizeof(spi_xfer));
- spi_xfer.tx_buf = phy->tx_buf;
- spi_xfer.rx_buf = phy->rx_buf;
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
spi_xfer.len = 4;
spi_xfer.cs_change = 1;
@@ -89,9 +87,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->rx_buf[3] & 0x01) == 0) {
+ if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->tx_buf[0] = 0;
+ phy->iobuf[0] = 0;
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer.len = 1;
@@ -100,7 +98,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
- if (phy->rx_buf[0] & 0x01)
+ if (phy->iobuf[0] & 0x01)
break;
}
@@ -114,12 +112,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.len = transfer_len;
spi_xfer.delay_usecs = 5;
- if (direction) {
+ if (in) {
spi_xfer.tx_buf = NULL;
- spi_xfer.rx_buf = buffer;
- } else {
- spi_xfer.tx_buf = buffer;
+ } else if (out) {
spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
}
spi_message_init(&m);
@@ -128,8 +126,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
len -= transfer_len;
- buffer += transfer_len;
}
exit:
@@ -140,13 +142,13 @@ exit:
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result)
{
- return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
- return tpm_tis_spi_transfer(data, addr, len, value, 0);
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
@@ -195,6 +197,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->spi_device = dev;
+ phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8f890c1aca57..8c0017d48571 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1405,7 +1405,6 @@ static int add_port(struct ports_device *portdev, u32 id)
{
char debugfs_name[16];
struct port *port;
- struct port_buffer *buf;
dev_t devt;
unsigned int nr_added_bufs;
int err;
@@ -1516,8 +1515,6 @@ static int add_port(struct ports_device *portdev, u32 id)
return 0;
free_inbufs:
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1542,34 +1539,14 @@ static void remove_port(struct kref *kref)
static void remove_port_data(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
- /* Remove buffers we queued up for the Host to send us data in. */
- do {
- spin_lock_irq(&port->inbuf_lock);
- buf = virtqueue_detach_unused_buf(port->in_vq);
- spin_unlock_irq(&port->inbuf_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
-
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
-
- /* Free pending buffers from the out-queue. */
- do {
- spin_lock_irq(&port->outvq_lock);
- buf = virtqueue_detach_unused_buf(port->out_vq);
- spin_unlock_irq(&port->outvq_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
}
/*
@@ -1794,13 +1771,24 @@ static void control_work_handler(struct work_struct *work)
spin_unlock(&portdev->c_ivq_lock);
}
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
static void out_intr(struct virtqueue *vq)
{
struct port *port;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
wake_up_interruptible(&port->waitqueue);
}
@@ -1811,8 +1799,10 @@ static void in_intr(struct virtqueue *vq)
unsigned long flags;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
spin_lock_irqsave(&port->inbuf_lock, flags);
port->inbuf = get_inbuf(port);
@@ -1987,6 +1977,15 @@ static const struct file_operations portdev_fops = {
static void remove_vqs(struct ports_device *portdev)
{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ }
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 4e1cd5aa69d8..07c8f701e51c 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -260,13 +260,13 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
gck->lock = lock;
gck->range = *range;
+ clk_generated_startup(gck);
hw = &gck->hw;
ret = clk_hw_register(NULL, &gck->hw);
if (ret) {
kfree(gck);
hw = ERR_PTR(ret);
- } else
- clk_generated_startup(gck);
+ }
return hw;
}
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 2acaa77ad482..73aab6e984cd 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -401,17 +401,17 @@ struct bcm2835_pll_ana_bits {
static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
.mask0 = 0,
.set0 = 0,
- .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK),
+ .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,
.set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
- .mask3 = (u32)~A2W_PLL_KA_MASK,
+ .mask3 = A2W_PLL_KA_MASK,
.set3 = (2 << A2W_PLL_KA_SHIFT),
.fb_prediv_mask = BIT(14),
};
static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
- .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK),
+ .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,
.set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
- .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK),
+ .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,
.set1 = (6 << A2W_PLLH_KP_SHIFT),
.mask3 = 0,
.set3 = 0,
@@ -545,9 +545,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
const struct bcm2835_pll_data *data = pll->data;
spin_lock(&cprman->regs_lock);
- cprman_write(cprman, data->cm_ctrl_reg,
- cprman_read(cprman, data->cm_ctrl_reg) |
- CM_PLL_ANARST);
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PWRDN);
@@ -566,8 +564,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
~A2W_PLL_CTRL_PWRDN);
/* Take the PLL out of reset. */
+ spin_lock(&cprman->regs_lock);
cprman_write(cprman, data->cm_ctrl_reg,
cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
+ spin_unlock(&cprman->regs_lock);
/* Wait for the PLL to lock. */
timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
@@ -581,6 +581,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
cpu_relax();
}
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PRST_DISABLE);
+
return 0;
}
@@ -644,9 +648,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
}
/* Unmask the reference clock from the oscillator. */
+ spin_lock(&cprman->regs_lock);
cprman_write(cprman, A2W_XOSC_CTRL,
cprman_read(cprman, A2W_XOSC_CTRL) |
data->reference_enable_mask);
+ spin_unlock(&cprman->regs_lock);
if (do_ana_setup_first)
bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c
index a564e9248814..adc14145861a 100644
--- a/drivers/clk/bcm/clk-ns2.c
+++ b/drivers/clk/bcm/clk-ns2.c
@@ -103,7 +103,7 @@ CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr",
static const struct iproc_pll_ctrl genpll_sw = {
.flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
- .aon = AON_VAL(0x0, 2, 9, 8),
+ .aon = AON_VAL(0x0, 1, 11, 10),
.reset = RESET_VAL(0x4, 2, 1),
.dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
.ndiv_int = REG_VAL(0x8, 4, 10),
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 5e918e7afaba..95a6e9834392 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -40,6 +40,10 @@
#define MMCM_REG_FILTER1 0x4e
#define MMCM_REG_FILTER2 0x4f
+#define MMCM_CLKOUT_NOCOUNT BIT(6)
+
+#define MMCM_CLK_DIV_NOCOUNT BIT(12)
+
struct axi_clkgen {
void __iomem *base;
struct clk_hw clk_hw;
@@ -315,12 +319,27 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int reg;
unsigned long long tmp;
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
- dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_2, &reg);
+ if (reg & MMCM_CLKOUT_NOCOUNT) {
+ dout = 1;
+ } else {
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
+ dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ }
+
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &reg);
- d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
- m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ if (reg & MMCM_CLK_DIV_NOCOUNT)
+ d = 1;
+ else
+ d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB2, &reg);
+ if (reg & MMCM_CLKOUT_NOCOUNT) {
+ m = 1;
+ } else {
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
+ m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ }
if (d == 0 || dout == 0)
return 0;
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 674785d968a3..f02900922bbe 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n",
+ pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 96d37175d0ad..8ad458b5ad6e 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = {
};
/* find closest match to given frequency in OPP table */
-static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
+static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
{
int idx;
- u32 fmin = 0, fmax = ~0, ftmp;
+ unsigned long fmin = 0, fmax = ~0, ftmp;
const struct scpi_opp *opp = clk->info->opps;
for (idx = 0; idx < clk->info->count; idx++, opp++) {
ftmp = opp->freq;
- if (ftmp >= (u32)rate) {
+ if (ftmp >= rate) {
if (ftmp <= fmax)
fmax = ftmp;
break;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index b051db43fae1..136a86094c53 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = {
"xtal", "clkin"
};
static const char * const si5351_pll_names[] = {
- "plla", "pllb", "vxco"
+ "si5351_plla", "si5351_pllb", "si5351_vxco"
};
static const char * const si5351_msynth_names[] = {
"ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0fb39fe217d1..0cdb8550729d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2438,6 +2438,21 @@ static int __clk_core_init(struct clk_core *core)
core->rate = core->req_rate = rate;
/*
+ * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
+ * don't get accidentally disabled when walking the orphan tree and
+ * reparenting clocks
+ */
+ if (core->flags & CLK_IS_CRITICAL) {
+ unsigned long flags;
+
+ clk_core_prepare(core);
+
+ flags = clk_enable_lock();
+ clk_core_enable(core);
+ clk_enable_unlock(flags);
+ }
+
+ /*
* walk the list of orphan clocks and reparent any that newly finds a
* parent.
*/
@@ -2445,10 +2460,13 @@ static int __clk_core_init(struct clk_core *core)
struct clk_core *parent = __clk_init_parent(orphan);
/*
- * we could call __clk_set_parent, but that would result in a
- * redundant call to the .set_rate op, if it exists
+ * We need to use __clk_set_parent_before() and _after() to
+ * to properly migrate any prepare/enable count of the orphan
+ * clock. This is important for CLK_IS_CRITICAL clocks, which
+ * are enabled during init but might not have a parent yet.
*/
if (parent) {
+ /* update the clk tree topology */
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
@@ -2467,16 +2485,6 @@ static int __clk_core_init(struct clk_core *core)
if (core->ops->init)
core->ops->init(core->hw);
- if (core->flags & CLK_IS_CRITICAL) {
- unsigned long flags;
-
- clk_core_prepare(core);
-
- flags = clk_enable_lock();
- clk_core_enable(core);
- clk_enable_unlock(flags);
- }
-
kref_init(&core->ref);
out:
clk_prepare_unlock();
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 2f29ee1a4d00..5588f75a8414 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -7,9 +7,9 @@ config COMMON_CLK_MESON8B
bool
depends on COMMON_CLK_AMLOGIC
help
- Support for the clock controller on AmLogic S805 devices, aka
- meson8b. Say Y if you want peripherals and CPU frequency scaling to
- work.
+ Support for the clock controller on AmLogic S802 (Meson8),
+ S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
+ want peripherals and CPU frequency scaling to work.
config COMMON_CLK_GXBB
bool
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 9d9af446bafc..37e05d6e010a 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -572,7 +572,7 @@ static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG0, 10);
+static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10);
static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
@@ -623,7 +623,7 @@ static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG2, 22);
+static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22);
static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 3f1be46cbb33..70567958b86a 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1,5 +1,6 @@
/*
- * AmLogic S805 / Meson8b Clock Controller Driver
+ * AmLogic S802 (Meson8) / S805 (Meson8b) / S812 (Meson8m2) Clock Controller
+ * Driver
*
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
@@ -661,7 +662,9 @@ iounmap:
}
static const struct of_device_id meson8b_clkc_match_table[] = {
+ { .compatible = "amlogic,meson8-clkc" },
{ .compatible = "amlogic,meson8b-clkc" },
+ { .compatible = "amlogic,meson8m2-clkc" },
{ }
};
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 8bccf4ecdab6..9ff4ea63932d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
}
static const u32 armada_38x_cpu_frequencies[] __initconst = {
- 0, 0, 0, 0,
- 1066 * 1000 * 1000, 0, 0, 0,
+ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
+ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1332 * 1000 * 1000, 0, 0, 0,
- 1600 * 1000 * 1000,
+ 1600 * 1000 * 1000, 0, 0, 0,
+ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
};
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {1, 2},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {7, 15},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 5c4e193164d4..8dd71345b5d0 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = {
static struct clk_rcg2 codec_digcodec_clk_src = {
.cmd_rcgr = 0x1c09c,
+ .mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll1_emclk_sleep_map,
.freq_tbl = ftbl_codec_clk,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index ca97e1151797..3b171bef913a 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2984,7 +2984,7 @@ static struct gdsc vfe1_gdsc = {
.cxcs = (unsigned int []){ 0x36ac },
.cxc_count = 1,
.pd = {
- .name = "vfe0",
+ .name = "vfe1",
},
.parent = &camss_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 00e6aba4b9c0..c55d5fe116d6 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -271,11 +271,14 @@ struct cpg_pll_config {
unsigned int extal_div;
unsigned int pll1_mult;
unsigned int pll3_mult;
+ unsigned int pll0_mult; /* For R-Car V2H and E2 only */
};
static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
- { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 },
- { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 },
+ { 1, 208, 106, 200 }, { 1, 208, 88, 200 },
+ { 1, 156, 80, 150 }, { 1, 156, 66, 150 },
+ { 2, 240, 122, 230 }, { 2, 240, 102, 230 },
+ { 2, 208, 106, 200 }, { 2, 208, 88, 200 },
};
/* SDHI divisors */
@@ -297,6 +300,12 @@ static const struct clk_div_table cpg_sd01_div_table[] = {
static u32 cpg_mode __initdata;
+static const char * const pll0_mult_match[] = {
+ "renesas,r8a7792-cpg-clocks",
+ "renesas,r8a7794-cpg-clocks",
+ NULL
+};
+
static struct clk * __init
rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
const struct cpg_pll_config *config,
@@ -317,9 +326,15 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ if (of_device_compatible_match(np, pll0_mult_match)) {
+ /* R-Car V2H and E2 do not have PLL0CR */
+ mult = config->pll0_mult;
+ div = 3;
+ } else {
+ u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
+ }
parent_name = "main";
- mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
} else if (!strcmp(name, "pll1")) {
parent_name = "main";
mult = config->pll1_mult / 2;
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index eea38f6ea77e..3892346c4fcc 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -46,7 +46,7 @@ struct div4_clk {
unsigned int shift;
};
-static struct div4_clk div4_clks[] = {
+static const struct div4_clk div4_clks[] = {
{ "zg", "pll0", CPG_FRQCRA, 16 },
{ "m3", "pll1", CPG_FRQCRA, 12 },
{ "b", "pll1", CPG_FRQCRA, 8 },
@@ -79,7 +79,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
{
const struct clk_div_table *table = NULL;
unsigned int shift, reg, width;
- const char *parent_name;
+ const char *parent_name = NULL;
unsigned int mult = 1;
unsigned int div = 1;
@@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
shift = 24;
width = 5;
} else {
- struct div4_clk *c;
+ const struct div4_clk *c;
for (c = div4_clks; c->name; c++) {
if (!strcmp(name, c->name)) {
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 9fe0939c1273..6ea5401e6881 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -750,7 +750,7 @@ static struct ccu_mp out_a_clk = {
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("out-a",
clk_out_parents,
- &ccu_div_ops,
+ &ccu_mp_ops,
0),
},
};
@@ -771,7 +771,7 @@ static struct ccu_mp out_b_clk = {
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("out-b",
clk_out_parents,
- &ccu_div_ops,
+ &ccu_mp_ops,
0),
},
};
@@ -792,7 +792,7 @@ static struct ccu_mp out_c_clk = {
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("out-c",
clk_out_parents,
- &ccu_div_ops,
+ &ccu_mp_ops,
0),
},
};
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6fb3cd24c1b6..a1d7fa48229d 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -599,6 +599,16 @@ void gpstate_timer_handler(unsigned long data)
if (!spin_trylock(&gpstates->gpstate_lock))
return;
+ /*
+ * If the timer has migrated to the different cpu then bring
+ * it back to one of the policy->cpus
+ */
+ if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+ gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+ add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
@@ -626,10 +636,8 @@ void gpstate_timer_handler(unsigned long data)
gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
+ set_pstate(&freq_data);
spin_unlock(&gpstates->gpstate_lock);
-
- /* Timer may get migrated to a different cpu on cpu hot unplug */
- smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
}
/*
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 7b596fa38ad2..6bebc1f9f55a 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = clk_arm;
- return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+ if (ftab)
+ return cpufreq_table_validate_and_show(policy, ftab);
+
+ return 0;
}
static int __init s3c_cpufreq_initclks(void)
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 86628e22b2a3..719c3d9f07fb 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -30,54 +30,63 @@
static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+struct cpufreq_target {
+ struct cpufreq_policy *policy;
+ unsigned int freq;
+};
+
static unsigned int sh_cpufreq_get(unsigned int cpu)
{
return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
}
-/*
- * Here we notify other drivers of the proposed change and the final change.
- */
-static int sh_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static long __sh_cpufreq_target(void *arg)
{
- unsigned int cpu = policy->cpu;
+ struct cpufreq_target *target = arg;
+ struct cpufreq_policy *policy = target->policy;
+ int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
- cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
struct device *dev;
long freq;
- cpus_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
-
- BUG_ON(smp_processor_id() != cpu);
+ if (smp_processor_id() != cpu)
+ return -ENODEV;
dev = get_cpu_device(cpu);
/* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
+ freq = clk_round_rate(cpuclk, target->freq * 1000);
if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
return -EINVAL;
- dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
+ dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
freqs.old = sh_cpufreq_get(cpu);
freqs.new = (freq + 500) / 1000;
freqs.flags = 0;
- cpufreq_freq_transition_begin(policy, &freqs);
- set_cpus_allowed_ptr(current, &cpus_allowed);
+ cpufreq_freq_transition_begin(target->policy, &freqs);
clk_set_rate(cpuclk, freq);
- cpufreq_freq_transition_end(policy, &freqs, 0);
+ cpufreq_freq_transition_end(target->policy, &freqs, 0);
dev_dbg(dev, "set frequency %lu Hz\n", freq);
-
return 0;
}
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_target data = { .policy = policy, .freq = target_freq };
+
+ return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
+}
+
static int sh_cpufreq_verify(struct cpufreq_policy *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b67f37..ea11a33e7fff 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
if (!state_node)
break;
- if (!of_device_is_available(state_node))
+ if (!of_device_is_available(state_node)) {
+ of_node_put(state_node);
continue;
+ }
if (!idle_state_valid(state_node, i, cpumask)) {
pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d0b16e5e4ee5..d8305ddf87d0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -750,7 +750,10 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
if (final)
new_len = DIV_ROUND_UP(new_len, bs) * bs;
else
- new_len = new_len / bs * bs;
+ new_len = (new_len - 1) / bs * bs;
+
+ if (nbytes != new_len)
+ list_ok = false;
while (nbytes > 0 && sg_tmp) {
n++;
@@ -846,6 +849,8 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
else
xmit_len = xmit_len / bs * bs;
+ } else if (!final) {
+ xmit_len -= bs;
}
hash_later = rctx->total - xmit_len;
@@ -873,14 +878,21 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
}
if (hash_later) {
- if (req->nbytes) {
- scatterwalk_map_and_copy(rctx->buffer, req->src,
- req->nbytes - hash_later,
- hash_later, 0);
- } else {
+ int offset = 0;
+
+ if (hash_later > req->nbytes) {
memcpy(rctx->buffer, rctx->buffer + xmit_len,
- hash_later);
+ hash_later - req->nbytes);
+ offset = hash_later - req->nbytes;
}
+
+ if (req->nbytes) {
+ scatterwalk_map_and_copy(rctx->buffer + offset,
+ req->src,
+ offset + req->nbytes -
+ hash_later, hash_later, 0);
+ }
+
rctx->bufcnt = hash_later;
} else {
rctx->bufcnt = 0;
@@ -1130,7 +1142,7 @@ retry:
ctx = ahash_request_ctx(req);
err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
- if (err)
+ if (err || !ctx->total)
goto err1;
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
@@ -1189,11 +1201,10 @@ static int omap_sham_update(struct ahash_request *req)
if (!req->nbytes)
return 0;
- if (ctx->total + req->nbytes < ctx->buflen) {
+ if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
0, req->nbytes, 0);
ctx->bufcnt += req->nbytes;
- ctx->total += req->nbytes;
return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 42c060c7ae15..7c71722be395 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1116,10 +1116,10 @@ next:
return count;
}
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
- unsigned int len, struct talitos_edesc *edesc,
- struct talitos_ptr *ptr,
- int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1130,7 +1130,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
}
to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
@@ -1140,7 +1140,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
return sg_count;
}
- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
&edesc->link_tbl[tbl_off]);
if (sg_count == 1) {
/* Only one segment now, so no link tbl needed*/
@@ -1154,6 +1154,15 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
return sg_count;
}
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off)
+{
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+ tbl_off, 0);
+}
+
/*
* fill in and submit ipsec_esp descriptor
*/
@@ -1171,7 +1180,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
- int sg_link_tbl_len;
+ int elen = 0;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1225,20 +1234,12 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
-
- sg_link_tbl_len = cryptlen;
-
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
- to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
-
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len += authsize;
- }
+ if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+ elen = authsize;
- ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
- &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen);
if (ret > 1) {
tbl_off += ret;
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 40be3747724d..473b44c008dd 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -453,9 +453,21 @@ static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
return rc;
}
+static int dax_dev_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+ struct dax_region *dax_region = dax_dev->region;
+
+ if (!IS_ALIGNED(addr, dax_region->align))
+ return -EINVAL;
+ return 0;
+}
+
static const struct vm_operations_struct dax_dev_vm_ops = {
.fault = dax_dev_fault,
.pmd_fault = dax_dev_pmd_fault,
+ .split = dax_dev_split,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 9e5674c5a07b..db70cee71caa 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -943,7 +943,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if (df->governor->immutable || governor->immutable) {
+ } else if ((df->governor && df->governor->immutable) ||
+ governor->immutable) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d443a1..ee7b48d5243c 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
- initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
- rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+ rmb();
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c74e25..c7568869284e 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
}
}
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
{
int i;
- for (i = 0; i < DMAMUX_NR; i++)
+ for (i = 0; i < nr_clocks; i++)
clk_disable_unprepare(fsl_edma->muxclk[i]);
}
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsl_edma->muxbase[i]))
+ if (IS_ERR(fsl_edma->muxbase[i])) {
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxbase[i]);
+ }
sprintf(clkname, "dmamux%d", i);
fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret) {
- /* disable only clks which were enabled on error */
- for (; i >= 0; i--)
- clk_disable_unprepare(fsl_edma->muxclk[i]);
-
- dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
- return ret;
- }
+ if (ret)
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
}
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return 0;
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d1651a50c349..b9c29720aeb1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
+static int sdma_disable_channel_with_delay(struct dma_chan *chan)
+{
+ sdma_disable_channel(chan);
+
+ /*
+ * According to NXP R&D team a delay of one BD SDMA cost time
+ * (maximum is 1ms) should be added after disable of the channel
+ * bit, to ensure SDMA core has really been stopped after SDMA
+ * clients call .device_terminate_all.
+ */
+ mdelay(1);
+
+ return 0;
+}
+
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@@ -1740,19 +1755,26 @@ static int sdma_probe(struct platform_device *pdev)
if (IS_ERR(sdma->clk_ahb))
return PTR_ERR(sdma->clk_ahb);
- clk_prepare(sdma->clk_ipg);
- clk_prepare(sdma->clk_ahb);
+ ret = clk_prepare(sdma->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(sdma->clk_ahb);
+ if (ret)
+ goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
sdma);
if (ret)
- return ret;
+ goto err_irq;
sdma->irq = irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
- return -ENOMEM;
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
@@ -1828,7 +1850,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel;
+ sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
@@ -1867,6 +1889,10 @@ err_register:
dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
+err_irq:
+ clk_unprepare(sdma->clk_ahb);
+err_clk:
+ clk_unprepare(sdma->clk_ipg);
return ret;
}
@@ -1878,6 +1904,8 @@ static int sdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
+ clk_unprepare(sdma->clk_ahb);
+ clk_unprepare(sdma->clk_ipg);
/* Kill the tasklet */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 43e88d85129e..8c3c588834d2 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -54,7 +54,15 @@ struct ti_am335x_xbar_map {
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val, iomem + event);
+ /*
+ * TPCC_EVT_MUX_60_63 register layout is different than the
+ * rest, in the sense, that event 63 is mapped to lowest byte
+ * and event 60 is mapped to highest, handle it separately.
+ */
+ if (event >= 60 && event <= 63)
+ writeb_relaxed(val, iomem + (63 - event % 4));
+ else
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 6d221e5c72ee..22658057fe27 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -933,7 +933,8 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
if (!chan)
return;
- devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ if (chan->irq)
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
tasklet_kill(&chan->tasklet);
list_del(&chan->common.device_node);
clk_disable_unprepare(chan->clk_apb);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 58d3e2b39b5b..61262a7a5c3a 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1020,13 +1020,23 @@ out:
return ret;
}
+static int socfpga_is_a10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-arria10");
+}
+
static int validate_parent_available(struct device_node *np);
static const struct of_device_id altr_edac_a10_device_of_match[];
static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
{
int irq;
- struct device_node *child, *np = of_find_compatible_node(NULL, NULL,
- "altr,socfpga-a10-ecc-manager");
+ struct device_node *child, *np;
+
+ if (!socfpga_is_a10())
+ return -ENODEV;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "altr,socfpga-a10-ecc-manager");
if (!np) {
edac_printk(KERN_ERR, EDAC_DEVICE, "ECC Manager not found\n");
return -ENODEV;
@@ -1542,8 +1552,12 @@ static const struct edac_device_prv_data a10_sdmmceccb_data = {
static int __init socfpga_init_sdmmc_ecc(void)
{
int rc = -ENODEV;
- struct device_node *child = of_find_compatible_node(NULL, NULL,
- "altr,socfpga-sdmmc-ecc");
+ struct device_node *child;
+
+ if (!socfpga_is_a10())
+ return -ENODEV;
+
+ child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
if (!child) {
edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
return -ENODEV;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index cb9b8577acbc..61c19b81ed81 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -759,7 +759,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8263429e21b8..79a48c37fb35 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu)
return cpu == resident_cpu;
}
-struct psci_operations psci_ops;
+struct psci_operations psci_ops = {
+ .conduit = PSCI_CONDUIT_NONE,
+ .smccc_version = SMCCC_VERSION_1_0,
+};
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
0, 0, 0);
}
+static void set_conduit(enum psci_conduit conduit)
+{
+ switch (conduit) {
+ case PSCI_CONDUIT_HVC:
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ break;
+ case PSCI_CONDUIT_SMC:
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ break;
+ default:
+ WARN(1, "Unexpected PSCI conduit %d\n", conduit);
+ }
+
+ psci_ops.conduit = conduit;
+}
+
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np)
}
if (!strcmp("hvc", method)) {
- invoke_psci_fn = __invoke_psci_fn_hvc;
+ set_conduit(PSCI_CONDUIT_HVC);
} else if (!strcmp("smc", method)) {
- invoke_psci_fn = __invoke_psci_fn_smc;
+ set_conduit(PSCI_CONDUIT_SMC);
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
@@ -493,9 +512,36 @@ static void __init psci_init_migrate(void)
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
+static void __init psci_init_smccc(void)
+{
+ u32 ver = ARM_SMCCC_VERSION_1_0;
+ int feature;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ u32 ret;
+ ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ if (ret == ARM_SMCCC_VERSION_1_1) {
+ psci_ops.smccc_version = SMCCC_VERSION_1_1;
+ ver = ret;
+ }
+ }
+
+ /*
+ * Conveniently, the SMCCC and PSCI versions are encoded the
+ * same way. No, this isn't accidental.
+ */
+ pr_info("SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+
+}
+
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_ops.get_version = psci_get_version;
+
psci_function_id[PSCI_FN_CPU_SUSPEND] =
PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend;
@@ -539,6 +585,7 @@ static int __init psci_probe(void)
psci_init_migrate();
if (PSCI_VERSION_MAJOR(ver) >= 1) {
+ psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
}
@@ -652,9 +699,9 @@ int __init psci_acpi_init(void)
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
- invoke_psci_fn = __invoke_psci_fn_hvc;
+ set_conduit(PSCI_CONDUIT_HVC);
else
- invoke_psci_fn = __invoke_psci_fn_smc;
+ set_conduit(PSCI_CONDUIT_SMC);
return psci_probe();
}
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 03a5925a423c..a9daf7121e6e 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -256,7 +256,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
reg |= bit;
else
- reg &= bit;
+ reg &= ~bit;
iowrite32(reg, addr);
spin_unlock_irqrestore(&gpio->lock, flags);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 7c446d118cd6..1d87b0718d3a 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
{
int reg;
- if (gpio == 94)
- return GPIOPANELCTL;
+ if (gpio >= CRYSTALCOVE_GPIO_NUM) {
+ /*
+ * Virtual GPIO called from ACPI, for now we only support
+ * the panel ctl.
+ */
+ switch (gpio) {
+ case 0x5e:
+ return GPIOPANELCTL;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
if (reg_type == CTRL_IN) {
if (gpio < 8)
@@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio)
static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return 0;
- return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_INPUT_SET);
+ return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
}
static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return 0;
- return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_OUTPUT_SET | value);
+ return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
}
static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
- int ret;
unsigned int val;
+ int ret, reg = to_reg(gpio, CTRL_IN);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return 0;
- ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val);
+ ret = regmap_read(cg->regmap, reg, &val);
if (ret)
return ret;
@@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- if (gpio > CRYSTALCOVE_VGPIO_NUM)
+ if (reg < 0)
return;
if (value)
- regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+ regmap_update_bits(cg->regmap, reg, 1, 1);
else
- regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+ regmap_update_bits(cg->regmap, reg, 1, 0);
}
static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type)
struct crystalcove_gpio *cg =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+ return 0;
+
switch (type) {
case IRQ_TYPE_NONE:
cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data)
struct crystalcove_gpio *cg =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- cg->set_irq_mask = false;
- cg->update |= UPDATE_IRQ_MASK;
+ if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+ cg->set_irq_mask = false;
+ cg->update |= UPDATE_IRQ_MASK;
+ }
}
static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data)
struct crystalcove_gpio *cg =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- cg->set_irq_mask = true;
- cg->update |= UPDATE_IRQ_MASK;
+ if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+ cg->set_irq_mask = true;
+ cg->update |= UPDATE_IRQ_MASK;
+ }
}
static struct irq_chip crystalcove_irqchip = {
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index d0ddba7a9d08..5ef4980f3f14 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -51,6 +51,8 @@
#define GROUP1_NR_IRQS 6
#define IRQ_MASK_BASE 0x4e19
#define IRQ_STATUS_BASE 0x4e0b
+#define GPIO_IRQ0_MASK GENMASK(6, 0)
+#define GPIO_IRQ1_MASK GENMASK(5, 0)
#define UPDATE_IRQ_TYPE BIT(0)
#define UPDATE_IRQ_MASK BIT(1)
@@ -310,7 +312,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- pending = p[0] | (p[1] << 8);
+ pending = (p[0] & GPIO_IRQ0_MASK) | ((p[1] & GPIO_IRQ1_MASK) << 7);
if (!pending)
return IRQ_NONE;
@@ -318,7 +320,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
while (pending) {
/* One iteration is for all pending bits */
for_each_set_bit(gpio, (const unsigned long *)&pending,
- GROUP0_NR_IRQS) {
+ WCOVE_GPIO_NUM) {
offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
BIT(gpio);
@@ -334,7 +336,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
break;
}
- pending = p[0] | (p[1] << 8);
+ pending = (p[0] & GPIO_IRQ0_MASK) | ((p[1] & GPIO_IRQ1_MASK) << 7);
}
return IRQ_HANDLED;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f3c3680963b9..56b24198741c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -425,7 +425,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
struct file *file;
- int fd, i, ret;
+ int fd, i, count = 0, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
@@ -471,6 +471,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
+ count = i;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -537,7 +538,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
out_put_unused_fd:
put_unused_fd(fd);
out_free_descs:
- for (; i >= 0; i--)
+ for (i = 0; i < count; i++)
gpiod_free(lh->descs[i]);
kfree(lh->label);
out_free_lh:
@@ -794,7 +795,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
- goto out_free_desc;
+ goto out_free_label;
le->desc = desc;
le->eflags = eflags;
@@ -3231,7 +3232,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /* If a connection label was passed use that, else use the device name as label */
+ status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
if (status < 0)
return ERR_PTR(status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5796539a0bcb..648ecf69bad5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
size_t size;
u32 retry = 3;
+ if (amdgpu_acpi_pcie_notify_device_ready(adev))
+ return -EINVAL;
+
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 6c343a933182..0217f5d6ecb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -14,6 +14,16 @@
#include "amd_acpi.h"
+#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
+
+struct amdgpu_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
struct amdgpu_atpx_functions {
bool px_params;
bool power_cntl;
@@ -35,6 +45,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
bool bridge_pm_usable;
+ unsigned int quirks;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
- /*
- * Disable legacy PM methods only when pcie port PM is usable,
- * otherwise the device might fail to power off or power on.
- */
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+ if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
+ printk("ATPX Hybrid Graphics, forcing to ATPX\n");
+ atpx->functions.power_cntl = true;
+ atpx->is_hybrid = false;
+ } else {
+ printk("ATPX Hybrid Graphics\n");
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
}
atpx->dgpu_req_power_for_displays = false;
@@ -547,6 +564,32 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.get_client_id = amdgpu_atpx_get_client_id,
};
+static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+};
+
+static void amdgpu_atpx_get_quirks(struct pci_dev *pdev)
+{
+ const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device) {
+ amdgpu_atpx_priv.quirks |= p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+}
+
/**
* amdgpu_atpx_detect - detect whether we have PX
*
@@ -570,6 +613,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
@@ -579,6 +623,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
if (has_atpx && vga_count == 2) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index c02db01f6583..fe011c7ec70a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -201,8 +201,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
for (i = 0; i < list->num_entries; i++) {
unsigned priority = list->array[i].priority;
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!list->array[i].robj->parent)
+ list_add_tail(&list->array[i].tv.head,
+ &bucket[priority]);
+
list->array[i].user_pages = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 086aa5c9c634..e9311eb7b8d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
+ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -871,9 +870,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
@@ -927,8 +928,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -991,9 +994,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1118,8 +1123,10 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1362,9 +1369,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1432,8 +1441,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index cb505f66d3aa..c801624f33bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -519,7 +519,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
- if (p->uf_entry.robj)
+ if (p->uf_entry.robj && !p->uf_entry.robj->parent)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ce9797b6f9c7..d0c3e56ece74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1678,8 +1678,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* ignore it */
vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
- if (amdgpu_runtime_pm == 1)
- runtime = true;
if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
@@ -2239,7 +2237,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring)
+ if (!ring || !ring->sched.thread)
continue;
kthread_park(ring->sched.thread);
amd_sched_hw_job_reset(&ring->sched);
@@ -2328,7 +2326,8 @@ retry:
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring)
+
+ if (!ring || !ring->sched.thread)
continue;
amd_sched_job_recovery(&ring->sched);
@@ -2337,7 +2336,7 @@ retry:
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i]) {
+ if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..15a2d8f3725d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -533,6 +533,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a7ea9a3b454e..d5e4748e3300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
amdgpu_mn_unregister(robj);
amdgpu_bo_unref(&robj);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f3efb1c5dae9..5afe72778518 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -94,6 +94,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..5caf517eec9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -273,12 +273,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- break;
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- if (i == AMDGPU_MAX_UVD_HANDLES)
- return 0;
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
cancel_delayed_work_sync(&adev->uvd.idle_work);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..e040a896179c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4475,34 +4475,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
case CHIP_KAVERI:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 4;
- if ((adev->pdev->device == 0x1304) ||
- (adev->pdev->device == 0x1305) ||
- (adev->pdev->device == 0x130C) ||
- (adev->pdev->device == 0x130F) ||
- (adev->pdev->device == 0x1310) ||
- (adev->pdev->device == 0x1311) ||
- (adev->pdev->device == 0x131C)) {
- adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1309) ||
- (adev->pdev->device == 0x130A) ||
- (adev->pdev->device == 0x130D) ||
- (adev->pdev->device == 0x1313) ||
- (adev->pdev->device == 0x131D)) {
- adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1306) ||
- (adev->pdev->device == 0x1307) ||
- (adev->pdev->device == 0x130B) ||
- (adev->pdev->device == 0x130E) ||
- (adev->pdev->device == 0x1315) ||
- (adev->pdev->device == 0x131B)) {
- adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
- } else {
- adev->gfx.config.max_cu_per_sh = 3;
- adev->gfx.config.max_backends_per_se = 1;
- }
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index a88d365be4c5..564362e8b486 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1484,10 +1484,11 @@ static const u32 sgpr_init_compute_shader[] =
static const u32 vgpr_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
- mmCOMPUTE_RESOURCE_LIMITS, 0,
+ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*4,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1504,10 +1505,11 @@ static const u32 vgpr_init_regs[] =
static const u32 sgpr1_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
- mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1528,6 +1530,7 @@ static const u32 sgpr2_init_regs[] =
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 4cb347e88cf0..3fa8320e49c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3507,6 +3507,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
max_mclk = 80000;
}
+ if ((adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6665)) {
+ max_sclk = 60000;
+ max_mclk = 80000;
+ }
} else if (adev->asic_type == CHIP_OLAND) {
if ((adev->pdev->revision == 0xC7) ||
(adev->pdev->revision == 0x80) ||
@@ -6444,9 +6449,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
{
u32 lane_width;
u32 new_lane_width =
- (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
amdgpu_set_pcie_lanes(adev, new_lane_width);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ef7c8de7060e..171480bb95d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -317,7 +317,8 @@ static struct kfd_process *create_process(const struct task_struct *thread)
/* init process apertures*/
process->is_32bit_user_mode = in_compat_syscall();
- if (kfd_init_apertures(process) != 0)
+ err = kfd_init_apertures(process);
+ if (err != 0)
goto err_init_apretures;
return process;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 1e5064749959..8c6e47c5507f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
+static void kfd_topology_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
static struct kobj_type sysprops_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = {
};
static struct kobj_type iolink_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = {
};
static struct kobj_type mem_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = {
};
static struct kobj_type cache_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
@@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = {
};
static struct kobj_type node_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232185a7..cfd80bc510b6 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -53,7 +53,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
}
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
fallback:
/*
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a751f6fe..cdb53586c8fe 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -322,19 +322,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
+ int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
- ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
- &tmds_oen, sizeof(tmds_oen));
- if (ret) {
- DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
- enable ? "enable" : "disable");
- return ret;
+ /*
+ * LSPCON adapters in low-power state may ignore the first write, so
+ * read back and verify the written value a few times.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t tmp;
+
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
+ enable ? "enable" : "disable",
+ retry + 1);
+ return ret;
+ }
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmp, sizeof(tmp));
+ if (ret) {
+ DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
+ enable ? "enabling" : "disabling",
+ retry + 1);
+ return ret;
+ }
+
+ if (tmp == tmds_oen)
+ return 0;
}
- return 0;
+ DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
+ enable ? "enabling" : "disabling");
+
+ return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0151ed2de770..6b31e0474271 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -107,6 +107,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3344,8 +3347,7 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
- * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
- * fill in.
+ * HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
@@ -3423,6 +3425,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
eld[5] |= total_sad_count << 4;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
+ else
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
+
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 48a6167f5e7b..00c815a7c414 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1202,9 +1202,9 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
return;
- else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
+ else if (drm_vblank_offdelay < 0)
vblank_disable_fn((unsigned long)vblank);
- else
+ else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
@@ -1819,6 +1819,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
wake_up(&vblank->queue);
drm_handle_vblank_events(dev, pipe);
+ /* With instant-off, we defer disabling the interrupt until after
+ * we finish processing the following vblank. The disable has to
+ * be last (after drm_handle_vblank_events) so that the timestamp
+ * is always accurate.
+ */
+ if (dev->vblank_disable_immediate &&
+ drm_vblank_offdelay > 0 &&
+ !atomic_read(&vblank->refcount))
+ vblank_disable_fn((unsigned long)vblank);
+
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return true;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 276474d13763..d7822bef1986 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -461,6 +461,26 @@ out:
}
/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker. This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+ struct work_struct *work = current_work();
+
+ return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
+/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7513e7678263..bae62cf934cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1703,6 +1703,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
+ else
+ intel_display_set_init_power(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 36a665f0e5c9..e23748cca0c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3681,7 +3681,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 val, int timeout_us);
+#define sandybridge_pcode_write(dev_priv, mbox, val) \
+ sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
+
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c0a53bf2e952..6c9f9c85e822 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6012,8 +6012,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
/* Inform power controller of upcoming frequency change */
mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000);
+ ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
@@ -6044,8 +6044,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(cdclk, 25000));
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ DIV_ROUND_UP(cdclk, 25000), 2000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 13c306173f27..0c935dede9f4 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1452,12 +1452,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct edid *edid;
bool connected = false;
+ struct i2c_adapter *i2c;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus));
+ i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d51ea47..3517c0ed984a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -321,7 +321,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+ if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 49de4760cc16..05427d292457 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7913,8 +7913,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+ u32 mbox, u32 val, int timeout_us)
{
int status;
@@ -7935,7 +7935,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
if (intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500)) {
+ timeout_us)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..7145127513c4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -91,14 +91,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
+ msm_obj->pages = p;
+
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
+ void *ptr = ERR_CAST(msm_obj->sgt);
+
dev_err(dev->dev, "failed to allocate sgt\n");
- return ERR_CAST(msm_obj->sgt);
+ msm_obj->sgt = NULL;
+ return ptr;
}
- msm_obj->pages = p;
-
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
@@ -121,7 +124,10 @@ static void put_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(msm_obj->sgt);
+
+ if (msm_obj->sgt)
+ sg_free_table(msm_obj->sgt);
+
kfree(msm_obj->sgt);
if (use_pages(obj))
@@ -764,6 +770,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
unsigned sz;
bool use_vram = false;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
@@ -857,7 +865,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
+ /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+ mutex_lock(&dev->struct_mutex);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+ mutex_unlock(&dev->struct_mutex);
+
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c1084088f9e4..56c288f78d8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -271,9 +271,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
- return conn_status;
+ /* Outputs are only polled while runtime active, so acquiring a
+ * runtime PM ref here is unnecessary (and would deadlock upon
+ * runtime suspend because it waits for polling to finish).
+ */
+ if (!drm_kms_helper_is_poll_worker()) {
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return conn_status;
+ }
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -348,8 +354,10 @@ detect_analog:
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2c2b86d68129..6526a3366087 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -106,7 +106,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
};
struct nouveau_display *disp = nouveau_display(crtc->dev);
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
- int ret, retry = 1;
+ int ret, retry = 20;
do {
ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index a4cb82495cee..245c946ea661 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
return ret;
pci->irq = pdev->irq;
+
+ /* Ensure MSI interrupts are armed, for the case where there are
+ * already interrupts pending (for whatever reason) at load time.
+ */
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
+
return ret;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index e859b3f893f7..6112c32b994f 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -456,6 +456,8 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
}
static const struct of_device_id td028ttec1_of_match[] = {
+ { .compatible = "omapdss,tpo,td028ttec1", },
+ /* keep to not break older DTB */
{ .compatible = "omapdss,toppoly,td028ttec1", },
{},
};
@@ -475,6 +477,7 @@ static struct spi_driver td028ttec1_spi_driver = {
module_spi_driver(td028ttec1_spi_driver);
+MODULE_ALIAS("spi:tpo,td028ttec1");
MODULE_ALIAS("spi:toppoly,td028ttec1");
MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 4b83e9eeab06..7def04049498 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -298,7 +298,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
+ goto cleanup;
}
+
+ /* Check the engine status before continue */
+ ret = wait_status(engine, DMM_PATSTATUS_READY |
+ DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
}
cleanup:
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db973..ca91651be3d4 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -195,7 +195,7 @@ static void evict_entry(struct drm_gem_object *obj,
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
if (m > 1) {
int i;
@@ -442,7 +442,7 @@ static int fault_2d(struct drm_gem_object *obj,
* into account in some of the math, so figure out virtual stride
* in pages
*/
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2cd879a4ae15..cdbb6e625f05 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -387,9 +387,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
int qxl_fbdev_init(struct qxl_device *qdev)
{
+ int ret = 0;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct qxl_fbdev *qfbdev;
int bpp_sel = 32; /* TODO: parameter from somewhere? */
- int ret;
qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
if (!qfbdev)
@@ -423,6 +425,8 @@ fini:
drm_fb_helper_fini(&qfbdev->helper);
free:
kfree(qfbdev);
+#endif
+
return ret;
}
@@ -438,6 +442,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
{
+ if (!qdev->mode_info.qfbdev)
+ return;
+
drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index edee6a5f4da9..b99f3e59011c 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3244,35 +3244,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
- if ((rdev->pdev->device == 0x1304) ||
- (rdev->pdev->device == 0x1305) ||
- (rdev->pdev->device == 0x130C) ||
- (rdev->pdev->device == 0x130F) ||
- (rdev->pdev->device == 0x1310) ||
- (rdev->pdev->device == 0x1311) ||
- (rdev->pdev->device == 0x131C)) {
- rdev->config.cik.max_cu_per_sh = 8;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1309) ||
- (rdev->pdev->device == 0x130A) ||
- (rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313) ||
- (rdev->pdev->device == 0x131D)) {
- rdev->config.cik.max_cu_per_sh = 6;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1306) ||
- (rdev->pdev->device == 0x1307) ||
- (rdev->pdev->device == 0x130B) ||
- (rdev->pdev->device == 0x130E) ||
- (rdev->pdev->device == 0x1315) ||
- (rdev->pdev->device == 0x1318) ||
- (rdev->pdev->device == 0x131B)) {
- rdev->config.cik.max_cu_per_sh = 4;
- rdev->config.cik.max_backends_per_se = 1;
- } else {
- rdev->config.cik.max_cu_per_sh = 3;
- rdev->config.cik.max_backends_per_se = 1;
- }
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
rdev->config.cik.max_sh_per_se = 1;
rdev->config.cik.max_texture_channel_caches = 4;
rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 27affbde058c..f416f5c2e8e9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (radeon_dp_needs_link_train(radeon_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (!radeon_dp_getdpcd(radeon_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -897,9 +890,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -922,8 +917,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1037,9 +1036,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1106,8 +1107,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1171,9 +1174,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect)
return ret;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1185,8 +1190,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1249,9 +1258,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (radeon_connector->detected_hpd_without_ddc) {
force = true;
@@ -1434,8 +1445,10 @@ out:
}
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1686,9 +1699,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dig_connector->is_mst)
return connector_status_disconnected;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1775,8 +1790,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b6d7fd964cbc..64f4c2251331 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1352,6 +1352,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 41b72ce6613f..83e1345db9e2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
* may be slow
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/
-
+#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
+#endif
if (bo->flags & RADEON_GEM_GTT_WC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 574ab0016a57..b82ef5ed727c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5969,9 +5969,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 3322b157106d..1c4d95dea887 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -512,6 +512,13 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc)
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
rcrtc->outputs = 0;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7eba305c488..32d87c6035c9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -503,7 +503,7 @@ static int vop_enable(struct drm_crtc *crtc)
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
- goto err_put_pm_runtime;
+ return ret;
}
ret = clk_enable(vop->hclk);
@@ -1348,10 +1348,16 @@ static int vop_initial(struct vop *vop)
return PTR_ERR(vop->dclk);
}
+ ret = pm_runtime_get_sync(vop->dev);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+ }
+
ret = clk_prepare(vop->dclk);
if (ret < 0) {
dev_err(vop->dev, "failed to prepare dclk\n");
- return ret;
+ goto err_put_pm_runtime;
}
/* Enable both the hclk and aclk to setup the vop */
@@ -1380,6 +1386,9 @@ static int vop_initial(struct vop *vop)
usleep_range(10, 20);
reset_control_deassert(ahb_rst);
+ VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
+ VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
+
memcpy(vop->regsbak, vop->regs, vop->len);
for (i = 0; i < vop_data->table_size; i++)
@@ -1411,6 +1420,8 @@ static int vop_initial(struct vop *vop)
vop->is_enabled = false;
+ pm_runtime_put_sync(vop->dev);
+
return 0;
err_disable_aclk:
@@ -1419,6 +1430,8 @@ err_disable_hclk:
clk_disable_unprepare(vop->hclk);
err_unprepare_dclk:
clk_unprepare(vop->dclk);
+err_put_pm_runtime:
+ pm_runtime_put_sync(vop->dev);
return ret;
}
@@ -1519,12 +1532,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (!vop->regsbak)
return -ENOMEM;
- ret = vop_initial(vop);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
- return ret;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "cannot find irq for vop\n");
@@ -1537,24 +1544,31 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
mutex_init(&vop->vsync_mutex);
- ret = devm_request_irq(dev, vop->irq, vop_isr,
- IRQF_SHARED, dev_name(dev), vop);
+ ret = vop_create_crtc(vop);
if (ret)
return ret;
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
+ pm_runtime_enable(&pdev->dev);
- ret = vop_create_crtc(vop);
+ ret = vop_initial(vop);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ goto err_disable_pm_runtime;
+ }
+
+ ret = devm_request_irq(dev, vop->irq, vop_isr,
+ IRQF_SHARED, dev_name(dev), vop);
if (ret)
- goto err_enable_irq;
+ goto err_disable_pm_runtime;
- pm_runtime_enable(&pdev->dev);
+ /* IRQ is initially disabled; it gets enabled in power_on */
+ disable_irq(vop->irq);
return 0;
-err_enable_irq:
- enable_irq(vop->irq); /* To balance out the disable_irq above */
+err_disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+ vop_destroy_crtc(vop);
return ret;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 4a192210574f..caba0311c86c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -136,5 +137,9 @@ struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm)
drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
+ /* Set crtc.port to output port node of the tcon */
+ scrtc->crtc.port = of_graph_get_port_by_id(drv->tcon->dev->of_node,
+ 1);
+
return scrtc;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 1feec34ca9dd..aad2f4a2a0ef 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -145,7 +145,7 @@ static int sun4i_drv_bind(struct device *dev)
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all pipelines components\n");
- goto free_drm;
+ goto cleanup_mode_config;
}
/* Create our layers */
@@ -153,7 +153,7 @@ static int sun4i_drv_bind(struct device *dev)
if (IS_ERR(drv->layers)) {
dev_err(drm->dev, "Couldn't create the planes\n");
ret = PTR_ERR(drv->layers);
- goto free_drm;
+ goto cleanup_mode_config;
}
/* Create our CRTC */
@@ -161,7 +161,7 @@ static int sun4i_drv_bind(struct device *dev)
if (!drv->crtc) {
dev_err(drm->dev, "Couldn't create the CRTC\n");
ret = -EINVAL;
- goto free_drm;
+ goto cleanup_mode_config;
}
drm->irq_enabled = true;
@@ -173,7 +173,7 @@ static int sun4i_drv_bind(struct device *dev)
if (IS_ERR(drv->fbdev)) {
dev_err(drm->dev, "Couldn't create our framebuffer\n");
ret = PTR_ERR(drv->fbdev);
- goto free_drm;
+ goto cleanup_mode_config;
}
/* Enable connectors polling */
@@ -181,10 +181,16 @@ static int sun4i_drv_bind(struct device *dev)
ret = drm_dev_register(drm, 0);
if (ret)
- goto free_drm;
+ goto finish_poll;
return 0;
+finish_poll:
+ drm_kms_helper_poll_fini(drm);
+ sun4i_framebuffer_free(drm);
+cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
free_drm:
drm_dev_unref(drm);
return ret;
@@ -206,6 +212,11 @@ static const struct component_master_ops sun4i_drv_master_ops = {
.unbind = sun4i_drv_unbind,
};
+static bool sun4i_drv_node_is_connector(struct device_node *node)
+{
+ return of_device_is_compatible(node, "hdmi-connector");
+}
+
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
@@ -246,6 +257,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
!of_device_is_available(node))
return 0;
+ /*
+ * The connectors will be the last nodes in our pipeline, we
+ * can just bail out.
+ */
+ if (sun4i_drv_node_is_connector(node))
+ return 0;
+
if (!sun4i_drv_node_is_frontend(node)) {
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %s\n",
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c6afb2448655..f2975a1525be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -336,12 +336,11 @@ static int sun4i_tcon_init_clocks(struct device *dev,
}
}
- return sun4i_dclk_create(dev, tcon);
+ return 0;
}
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
{
- sun4i_dclk_free(tcon);
clk_disable_unprepare(tcon->clk);
}
@@ -506,22 +505,28 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = sun4i_tcon_init_clocks(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON clocks\n");
+ goto err_assert_reset;
+ }
+
ret = sun4i_tcon_init_regmap(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON regmap\n");
- goto err_assert_reset;
+ goto err_free_clocks;
}
- ret = sun4i_tcon_init_clocks(dev, tcon);
+ ret = sun4i_dclk_create(dev, tcon);
if (ret) {
- dev_err(dev, "Couldn't init our TCON clocks\n");
- goto err_assert_reset;
+ dev_err(dev, "Couldn't create our TCON dot clock\n");
+ goto err_free_clocks;
}
ret = sun4i_tcon_init_irq(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON interrupts\n");
- goto err_free_clocks;
+ goto err_free_dotclock;
}
ret = sun4i_rgb_init(drm);
@@ -530,6 +535,8 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return 0;
+err_free_dotclock:
+ sun4i_dclk_free(tcon);
err_free_clocks:
sun4i_tcon_free_clocks(tcon);
err_assert_reset:
@@ -542,6 +549,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
{
struct sun4i_tcon *tcon = dev_get_drvdata(dev);
+ sun4i_dclk_free(tcon);
sun4i_tcon_free_clocks(tcon);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index f57c0d62c76a..19d8dc3cdd40 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -124,7 +124,7 @@ static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
struct tilcdc_drm_private *priv = dev->dev_private;
volatile void __iomem *addr = priv->mmio + reg;
-#ifdef iowrite64
+#if defined(iowrite64) && !defined(iowrite64_is_nonatomic)
iowrite64(data, addr);
#else
__iowmb();
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d09276ec7e90..52a2a1a75682 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1209,18 +1209,20 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
if (likely(!ret))
ret = ttm_bo_validate(bo, placement, interruptible, false);
- if (!resv) {
+ if (!resv)
ttm_bo_unreserve(bo);
- } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+ if (unlikely(ret)) {
+ ttm_bo_unref(&bo);
+ return ret;
+ }
+
+ if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
- if (unlikely(ret))
- ttm_bo_unref(&bo);
-
return ret;
}
EXPORT_SYMBOL(ttm_bo_init);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ddd6badd0eee..c756b2b7f5dc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -818,6 +818,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 611b6b9bb3cb..67ea2ce03a23 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -158,10 +158,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long offset;
unsigned long page, pos;
- if (offset + size > info->fix.smem_len)
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ec9023bd935b..d53e805d392f 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -80,6 +80,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
@@ -328,6 +329,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
}
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index ab3016982466..b608cd463d4e 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -110,8 +110,8 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
&handle);
if (ret) {
- state->bo_count = i - 1;
- goto err;
+ state->bo_count = i;
+ goto err_delete_handle;
}
bo_state[i].handle = handle;
bo_state[i].paddr = vc4_bo->base.paddr;
@@ -123,13 +123,16 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
state->bo_count * sizeof(*bo_state)))
ret = -EFAULT;
- kfree(bo_state);
+err_delete_handle:
+ if (ret) {
+ for (i = 0; i < state->bo_count; i++)
+ drm_gem_handle_delete(file_priv, bo_state[i].handle);
+ }
err_free:
-
vc4_free_hang_state(dev, kernel_state);
+ kfree(bo_state);
-err:
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf489478b..75056553b06c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -533,7 +533,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* the scl fields here.
*/
if (num_planes == 1) {
- scl0 = vc4_get_scl_field(state, 1);
+ scl0 = vc4_get_scl_field(state, 0);
scl1 = scl0;
} else {
scl0 = vc4_get_scl_field(state, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 917321ce832f..19a5bde8e490 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -874,6 +874,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
fail:
kfree(validation_state.branch_targets);
if (validated_shader) {
+ kfree(validated_shader->uniform_addr_offsets);
kfree(validated_shader->texture_samples);
kfree(validated_shader);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..52436b3c01bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -399,7 +399,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
- wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d2d93959b119..aec6e9eef489 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
set.y = 0;
set.mode = NULL;
set.fb = NULL;
- set.num_connectors = 1;
+ set.num_connectors = 0;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret) {
@@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
flush_delayed_work(&par->local_work);
mutex_lock(&par->bo_mutex);
+ drm_modeset_lock_all(vmw_priv->dev);
(void) vmw_fb_kms_detach(par, true, false);
+ drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc150df..33ca24ab983e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,7 +27,6 @@
#include "vmwgfx_kms.h"
-
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -1933,9 +1932,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare.
*/
-void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
{
- vmw_kms_helper_buffer_revert(res->backup);
+ struct vmw_resource *res = ctx->res;
+
+ vmw_kms_helper_buffer_revert(ctx->buf);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -1952,10 +1954,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
* interrupted by a signal.
*/
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible)
+ bool interruptible,
+ struct vmw_validation_ctx *ctx)
{
int ret = 0;
+ ctx->buf = NULL;
+ ctx->res = res;
+
if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else
@@ -1974,6 +1980,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
res->dev_priv->has_mob);
if (ret)
goto out_unreserve;
+
+ ctx->buf = vmw_dmabuf_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
@@ -1981,7 +1989,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
return 0;
out_revert:
- vmw_kms_helper_buffer_revert(res->backup);
+ vmw_kms_helper_buffer_revert(ctx->buf);
out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0);
out_unlock:
@@ -1997,13 +2005,16 @@ out_unlock:
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
*/
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
- struct vmw_fence_obj **out_fence)
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
+ struct vmw_fence_obj **out_fence)
{
- if (res->backup || out_fence)
- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+ struct vmw_resource *res = ctx->res;
+
+ if (ctx->buf || out_fence)
+ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff4803c107bc..2dd05395e98b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -183,6 +183,11 @@ struct vmw_display_unit {
int set_gui_y;
};
+struct vmw_validation_ctx {
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *buf;
+};
+
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
#define vmw_connector_to_du(x) \
@@ -233,9 +238,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user *
user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible);
-void vmw_kms_helper_resource_revert(struct vmw_resource *res);
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ bool interruptible,
+ struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index f42359084adc..a6ca2185f5b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -753,12 +753,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty;
+ struct vmw_validation_ctx ctx;
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true);
+ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret)
return ret;
@@ -777,7 +778,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
- vmw_kms_helper_resource_finish(srf, out_fence);
+ vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 94ad8d2acf9a..8b914504b857 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -977,12 +977,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty;
+ struct vmw_validation_ctx ctx;
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true);
+ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret)
return ret;
@@ -1005,7 +1006,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
out_finish:
- vmw_kms_helper_resource_finish(srf, out_fence);
+ vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 03cac5731afc..7944a1f589eb 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1370,7 +1370,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
* of implement() working on 8 byte chunks
*/
- int len = hid_report_len(report) + 7;
+ u32 len = hid_report_len(report) + 7;
return kmalloc(len, flags);
}
@@ -1435,7 +1435,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
{
char *buf;
int ret;
- int len;
+ u32 len;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
@@ -1461,14 +1461,14 @@ out:
}
EXPORT_SYMBOL_GPL(__hid_request);
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt)
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
unsigned int a;
- int rsize, csize = size;
+ u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@@ -1526,7 +1526,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
*
* This is data entry for lower layers.
*/
-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
+int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;
@@ -2443,6 +2443,9 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
+ /*
+ * ELO devices have one Button usage in GenDesk field, which makes
+ * hid-input map it to BTN_LEFT; that confuses userspace, which then
+ * considers the device to be a mouse/touchpad instead of touchscreen.
+ */
+ clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 244b97c1b74e..9347b37a1303 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -608,6 +608,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
#define USB_DEVICE_ID_LD_JWM 0x1080
#define USB_DEVICE_ID_LD_DMMP 0x1081
#define USB_DEVICE_ID_LD_UMIP 0x1090
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1cef8b..5ff6dd8147b6 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1149,18 +1149,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
/*
* Ignore out-of-range values as per HID specification,
- * section 5.10 and 6.2.25.
+ * section 5.10 and 6.2.25, when NULL state bit is present.
+ * When it's not, clamp the value to match Microsoft's input
+ * driver as mentioned in "Required HID usages for digitizers":
+ * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp
*
* The logical_minimum < logical_maximum check is done so that we
* don't unintentionally discard values sent by devices which
* don't specify logical min and max.
*/
if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
- (field->logical_minimum < field->logical_maximum) &&
- (value < field->logical_minimum ||
- value > field->logical_maximum)) {
- dbg_hid("Ignoring out-of-range value %x\n", value);
- return;
+ (field->logical_minimum < field->logical_maximum)) {
+ if (field->flags & HID_MAIN_ITEM_NULL_STATE &&
+ (value < field->logical_minimum ||
+ value > field->logical_maximum)) {
+ dbg_hid("Ignoring out-of-range value %x\n", value);
+ return;
+ }
+ value = clamp(value,
+ field->logical_minimum,
+ field->logical_maximum);
}
/*
@@ -1271,7 +1279,8 @@ static void hidinput_led_worker(struct work_struct *work)
led_work);
struct hid_field *field;
struct hid_report *report;
- int len, ret;
+ int ret;
+ u32 len;
__u8 *buf;
field = hidinput_get_led_field(hid);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 89e9032ab1e7..fba655d639af 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -315,7 +315,8 @@ static struct attribute_group mt_attribute_group = {
static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hdev);
- int ret, size = hid_report_len(report);
+ int ret;
+ u32 size = hid_report_len(report);
u8 *buf;
/*
@@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
struct hid_report_enum *re;
struct mt_class *cls = &td->mtclass;
char *buf;
- int report_len;
+ u32 report_len;
if (td->inputmode < 0)
return;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index be89bcbf6a71..276d12d4b576 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -110,8 +110,8 @@ struct rmi_data {
u8 *writeReport;
u8 *readReport;
- int input_report_size;
- int output_report_size;
+ u32 input_report_size;
+ u32 output_report_size;
unsigned long flags;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a821bd..1b1dccd37fbd 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1056,7 +1056,6 @@ struct sony_sc {
u8 battery_charging;
u8 battery_capacity;
u8 led_state[MAX_LEDS];
- u8 resume_led_state[MAX_LEDS];
u8 led_delay_on[MAX_LEDS];
u8 led_delay_off[MAX_LEDS];
u8 led_count;
@@ -1793,6 +1792,7 @@ static int sony_leds_init(struct sony_sc *sc)
led->name = name;
led->brightness = sc->led_state[n];
led->max_brightness = max_brightness[n];
+ led->flags = LED_CORE_SUSPENDRESUME;
led->brightness_get = sony_led_get_brightness;
led->brightness_set = sony_led_set_brightness;
@@ -2509,47 +2509,32 @@ static void sony_remove(struct hid_device *hdev)
static int sony_suspend(struct hid_device *hdev, pm_message_t message)
{
- /*
- * On suspend save the current LED state,
- * stop running force-feedback and blank the LEDS.
- */
- if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
#ifdef CONFIG_SONY_FF
- sc->left = sc->right = 0;
-#endif
- memcpy(sc->resume_led_state, sc->led_state,
- sizeof(sc->resume_led_state));
- memset(sc->led_state, 0, sizeof(sc->led_state));
+ /* On suspend stop any running force-feedback events */
+ if (SONY_FF_SUPPORT) {
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+ sc->left = sc->right = 0;
sony_send_output_report(sc);
}
+#endif
return 0;
}
static int sony_resume(struct hid_device *hdev)
{
- /* Restore the state of controller LEDs on resume */
- if (SONY_LED_SUPPORT) {
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- memcpy(sc->led_state, sc->resume_led_state,
- sizeof(sc->led_state));
-
- /*
- * The Sixaxis and navigation controllers on USB need to be
- * reinitialized on resume or they won't behave properly.
- */
- if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
- (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
- sixaxis_set_operational_usb(sc->hdev);
- sc->defer_initialization = 1;
- }
+ struct sony_sc *sc = hid_get_drvdata(hdev);
- sony_set_leds(sc);
+ /*
+ * The Sixaxis and navigation controllers on USB need to be
+ * reinitialized on resume or they won't behave properly.
+ */
+ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
+ (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
+ sixaxis_set_operational_usb(sc->hdev);
+ sc->defer_initialization = 1;
}
return 0;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index f0e2757cb909..216f0338a1f7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 865e7c262322..2548c5dbdc75 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -142,10 +142,10 @@ struct i2c_hid {
* register of the HID
* descriptor. */
unsigned int bufsize; /* i2c buffer size */
- char *inbuf; /* Input buffer */
- char *rawbuf; /* Raw Input buffer */
- char *cmdbuf; /* Command buffer */
- char *argsbuf; /* Command arguments buffer */
+ u8 *inbuf; /* Input buffer */
+ u8 *rawbuf; /* Raw Input buffer */
+ u8 *cmdbuf; /* Command buffer */
+ u8 *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
unsigned long quirks; /* Various quirks */
@@ -451,7 +451,8 @@ out_unlock:
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
- int ret, ret_size;
+ int ret;
+ u32 ret_size;
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
if (size > ihid->bufsize)
@@ -476,7 +477,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if (ret_size > size) {
+ if ((ret_size > size) || (ret_size <= 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
@@ -968,6 +969,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
return ret < 0 && ret != -ENXIO ? ret : 0;
}
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+
+ if (handle && acpi_bus_get_device(handle, &adev) == 0)
+ acpi_device_fix_up_power(adev);
+}
+
static const struct acpi_device_id i2c_hid_acpi_match[] = {
{"ACPI0C50", 0 },
{"PNP0C50", 0 },
@@ -980,6 +990,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
{
return -ENODEV;
}
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
#endif
#ifdef CONFIG_OF
@@ -1082,6 +1094,8 @@ static int i2c_hid_probe(struct i2c_client *client,
if (ret < 0)
goto err;
+ i2c_hid_acpi_fix_up_power(&client->dev);
+
pm_runtime_get_noresume(&client->dev);
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 7a4d39ce51d9..e8b90b534f08 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -351,7 +351,7 @@ static int wacom_set_device_mode(struct hid_device *hdev,
u8 *rep_data;
struct hid_report *r;
struct hid_report_enum *re;
- int length;
+ u32 length;
int error = -ENOMEM, limit = 0;
if (wacom_wac->mode_report < 0)
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 6031cd146556..802afc98e8bd 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -989,7 +989,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
/* Pad to 32-bits - FIXME: Revisit*/
if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
- goto drop;
+ goto inc_dropped;
/*
* Modem sends Phonet messages over SSI with its own endianess...
@@ -1041,8 +1041,9 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
drop2:
hsi_free_msg(msg);
drop:
- dev->stats.tx_dropped++;
dev_kfree_skb(skb);
+inc_dropped:
+ dev->stats.tx_dropped++;
return 0;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index d8bc4b910192..9360cdce740e 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -70,7 +70,7 @@ static const struct vmbus_device vmbus_devs[] = {
/* PCIE */
{ .dev_type = HV_PCIE,
HV_PCIE_GUID,
- .perf_device = true,
+ .perf_device = false,
},
/* Synthetic Frame Buffer */
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index b24f1d3045f0..ac63e562071f 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -115,21 +117,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -431,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[id->driver_data];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -441,10 +458,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -460,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 3baa4f4a8c5e..d659a02647d4 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -101,8 +101,8 @@ static const struct coefficients adm1075_coefficients[] = {
[0] = { 27169, 0, -1 }, /* voltage */
[1] = { 806, 20475, -1 }, /* current, irange25 */
[2] = { 404, 20475, -1 }, /* current, irange50 */
- [3] = { 0, -1, 8549 }, /* power, irange25 */
- [4] = { 0, -1, 4279 }, /* power, irange50 */
+ [3] = { 8549, 0, -1 }, /* power, irange25 */
+ [4] = { 4279, 0, -1 }, /* power, irange50 */
};
static const struct coefficients adm1275_coefficients[] = {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index d8517d2a968c..864488793f09 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -362,6 +362,13 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etr_cs_ops;
+ /*
+ * ETR configuration uses a 40-bit AXI master in place of
+ * the embedded SRAM of ETB/ETF.
+ */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ goto out;
} else {
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 0673baf0f2f5..ff579a7c6d00 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -46,8 +46,11 @@
#define TPIU_ITATBCTR0 0xef8
/** register definition **/
+/* FFSR - 0x300 */
+#define FFSR_FT_STOPPED BIT(1)
/* FFCR - 0x304 */
#define FFCR_FON_MAN BIT(6)
+#define FFCR_STOP_FI BIT(12)
/**
* @base: memory mapped base address for this component.
@@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- /* Clear formatter controle reg. */
- writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
+ /* Clear formatter and stop on flush */
+ writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
/* Generate manual flush */
- writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ /* Wait for flush to complete */
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ /* Wait for formatter to stop */
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 7bf00a0beb6f..4383324ec01c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -498,6 +498,9 @@ int coresight_enable(struct coresight_device *csdev)
{
int cpu, ret = 0;
struct list_head *path;
+ enum coresight_dev_subtype_source subtype;
+
+ subtype = csdev->subtype.source_subtype;
mutex_lock(&coresight_mutex);
@@ -505,8 +508,16 @@ int coresight_enable(struct coresight_device *csdev)
if (ret)
goto out;
- if (csdev->enable)
+ if (csdev->enable) {
+ /*
+ * There could be multiple applications driving the software
+ * source. So keep the refcount for each such user when the
+ * source is already enabled.
+ */
+ if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
+ atomic_inc(csdev->refcnt);
goto out;
+ }
path = coresight_build_path(csdev);
if (IS_ERR(path)) {
@@ -523,7 +534,7 @@ int coresight_enable(struct coresight_device *csdev)
if (ret)
goto err_source;
- switch (csdev->subtype.source_subtype) {
+ switch (subtype) {
case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
/*
* When working from sysFS it is important to keep track
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 629e031b7456..09142e99e915 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -149,7 +149,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
continue;
/* The local out port number */
- pdata->outports[i] = endpoint.id;
+ pdata->outports[i] = endpoint.port;
/*
* Get a handle on the remote port and parent
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 809f4d4e93a0..340e037b3224 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -507,7 +507,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev);
/* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ __i2c_dw_enable_and_wait(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e6fe21a6135b..b32bf7eac3c8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -243,6 +243,7 @@ struct i801_priv {
struct i2c_adapter adapter;
unsigned long smba;
unsigned char original_hstcfg;
+ unsigned char original_slvcmd;
struct pci_dev *pci_dev;
unsigned int features;
@@ -962,13 +963,24 @@ static int i801_enable_host_notify(struct i2c_adapter *adapter)
if (!priv->host_notify)
return -ENOMEM;
- outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
+ if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+ SMBSLVCMD(priv));
+
/* clear Host Notify bit to allow a new notification */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
return 0;
}
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+ if (!(priv->features & FEATURE_HOST_NOTIFY))
+ return;
+
+ outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
+}
+
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = i801_access,
.functionality = i801_func,
@@ -1589,6 +1601,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ /* Remember original Host Notify setting */
+ if (priv->features & FEATURE_HOST_NOTIFY)
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
/* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5;
@@ -1666,6 +1682,7 @@ static void i801_remove(struct pci_dev *dev)
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
+ i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
i801_acpi_remove(priv);
@@ -1679,6 +1696,15 @@ static void i801_remove(struct pci_dev *dev)
*/
}
+static void i801_shutdown(struct pci_dev *dev)
+{
+ struct i801_priv *priv = pci_get_drvdata(dev);
+
+ /* Restore config registers to avoid hard hang on some systems */
+ i801_disable_host_notify(priv);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
#ifdef CONFIG_PM
static int i801_suspend(struct device *dev)
{
@@ -1711,6 +1737,7 @@ static struct pci_driver i801_driver = {
.id_table = i801_ids,
.probe = i801_probe,
.remove = i801_remove,
+ .shutdown = i801_shutdown,
.driver = {
.pm = &i801_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index dfc98df7b1b6..7aa7b9cb6203 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -18,6 +18,9 @@
#define ACPI_SMBUS_HC_CLASS "smbus"
#define ACPI_SMBUS_HC_DEVICE_NAME "cmi"
+/* SMBUS HID definition as supported by Microsoft Windows */
+#define ACPI_SMBUS_MS_HID "SMB0001"
+
ACPI_MODULE_NAME("smbus_cmi");
struct smbus_methods_t {
@@ -51,6 +54,7 @@ static const struct smbus_methods_t ibm_smbus_methods = {
static const struct acpi_device_id acpi_smbus_cmi_ids[] = {
{"SMBUS01", (kernel_ulong_t)&smbus_methods},
{ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods},
+ {ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index c6a90b4a9c62..91b10afc8c34 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mux->data.reg_size = resource_size(res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mux->data.reg))
- return PTR_ERR(mux->data.reg);
+ if (IS_ERR(mux->data.reg)) {
+ ret = PTR_ERR(mux->data.reg);
+ goto err_put_parent;
+ }
}
if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
mux->data.reg_size != 1) {
dev_err(&pdev->dev, "Invalid register size\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_parent;
}
muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
i2c_mux_reg_select, NULL);
- if (!muxc)
- return -ENOMEM;
+ if (!muxc) {
+ ret = -ENOMEM;
+ goto err_put_parent;
+ }
muxc->priv = mux;
platform_set_drvdata(pdev, muxc);
@@ -235,6 +240,8 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
add_adapter_failed:
i2c_mux_del_adapters(muxc);
+err_put_parent:
+ i2c_put_adapter(parent);
return ret;
}
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 3a557e3181ea..32cd64c2ee06 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -827,6 +827,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata =
+ (struct st_sensors_platform_data *)adata->dev->platform_data;
int irq = adata->get_irq_data_ready(indio_dev);
int err;
@@ -853,11 +855,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
&adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
- if (!adata->dev->platform_data)
- adata->dev->platform_data =
- (struct st_sensors_platform_data *)&default_accel_pdata;
+ if (!pdata)
+ pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
- err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_accel_power_off;
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 678e8c7ea763..fec696ec3fd1 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev,
enum iio_event_direction dir, int state)
{
struct hi8435_priv *priv = iio_priv(idev);
+ int ret;
+ u32 tmp;
+
+ if (state) {
+ ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
+ if (ret < 0)
+ return ret;
+ if (tmp & BIT(chan->channel))
+ priv->event_prev_val |= BIT(chan->channel);
+ else
+ priv->event_prev_val &= ~BIT(chan->channel);
- priv->event_scan_mask &= ~BIT(chan->channel);
- if (state)
priv->event_scan_mask |= BIT(chan->channel);
+ } else
+ priv->event_scan_mask &= ~BIT(chan->channel);
return 0;
}
@@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi)
priv->spi = spi;
reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(reset_gpio)) {
- /* chip s/w reset if h/w reset failed */
+ if (!IS_ERR(reset_gpio)) {
+ /* need >=100ns low pulse to reset chip */
+ gpiod_set_raw_value_cansleep(reset_gpio, 0);
+ udelay(1);
+ gpiod_set_raw_value_cansleep(reset_gpio, 1);
+ } else {
+ /* s/w reset chip if h/w reset is not available */
hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST);
hi8435_writeb(priv, HI8435_CTRL_REG, 0);
- } else {
- udelay(5);
- gpiod_set_value(reset_gpio, 1);
}
spi_set_drvdata(spi, idev);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index ab646a90e3da..af85db50412e 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -215,7 +215,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
ret = sensor_hub_set_feature(st->hsdev, st->poll.report_id,
st->poll.index, sizeof(value), &value);
if (ret < 0 || value < 0)
- ret = -EINVAL;
+ return -EINVAL;
ret = sensor_hub_get_feature(st->hsdev,
st->poll.report_id,
@@ -265,7 +265,7 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
st->sensitivity.index, sizeof(value),
&value);
if (ret < 0 || value < 0)
- ret = -EINVAL;
+ return -EINVAL;
ret = sensor_hub_get_feature(st->hsdev,
st->sensitivity.report_id,
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f53e9a803a0e..93b99bd93738 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
@@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;
- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
indio_dev->trig = iio_trigger_get(adis->trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 158aaf44dd95..5d05c38c4ba9 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -174,7 +174,7 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
- if (!indio_dev->info)
+ if (!indio_dev->info || rb == NULL)
return 0;
poll_wait(filp, &rb->pollq, wait);
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 7de0f397194b..d23cf7759ee7 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -510,13 +510,26 @@ static int rpr0521_probe(struct i2c_client *client,
ret = pm_runtime_set_active(&client->dev);
if (ret < 0)
- return ret;
+ goto err_poweroff;
pm_runtime_enable(&client->dev);
pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS);
pm_runtime_use_autosuspend(&client->dev);
- return iio_device_register(indio_dev);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_pm_disable;
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+err_poweroff:
+ rpr0521_poweroff(data);
+
+ return ret;
}
static int rpr0521_remove(struct i2c_client *client)
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6325e7dc8e03..f3cb4dc05391 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id st_magn_id_table[] = {
- { LSM303DLHC_MAGN_DEV_NAME },
- { LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
{ LSM303AGR_MAGN_DEV_NAME },
{},
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 44e46c159a7e..3458418d88bc 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -638,6 +638,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata =
+ (struct st_sensors_platform_data *)press_data->dev->platform_data;
int irq = press_data->get_irq_data_ready(indio_dev);
int err;
@@ -673,12 +675,10 @@ int st_press_common_probe(struct iio_dev *indio_dev)
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
/* Some devices don't support a data ready pin. */
- if (!press_data->dev->platform_data &&
- press_data->sensor_settings->drdy_irq.addr)
- press_data->dev->platform_data =
- (struct st_sensors_platform_data *)&default_press_pdata;
+ if (!pdata && press_data->sensor_settings->drdy_irq.addr)
+ pdata = (struct st_sensors_platform_data *)&default_press_pdata;
- err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_press_power_off;
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb46fda6..2a4a62ebfd8d 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -871,12 +871,13 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
{
int ret;
unsigned int val;
+ long timeout;
zpa2326_dbg(indio_dev, "waiting for one shot completion interrupt");
- ret = wait_for_completion_interruptible_timeout(
+ timeout = wait_for_completion_interruptible_timeout(
&private->data_ready, ZPA2326_CONVERSION_JIFFIES);
- if (ret > 0)
+ if (timeout > 0)
/*
* Interrupt handler completed before timeout: return operation
* status.
@@ -886,13 +887,16 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
/* Clear all interrupts just to be sure. */
regmap_read(private->regmap, ZPA2326_INT_SOURCE_REG, &val);
- if (!ret)
+ if (!timeout) {
/* Timed out. */
+ zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
+ timeout);
ret = -ETIME;
-
- if (ret != -ERESTARTSYS)
- zpa2326_warn(indio_dev, "no one shot interrupt occurred (%d)",
- ret);
+ } else if (timeout < 0) {
+ zpa2326_warn(indio_dev,
+ "wait for one shot interrupt cancelled");
+ ret = -ERESTARTSYS;
+ }
return ret;
}
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index fb4ce0394ac7..978b8d94f9a4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -209,6 +209,22 @@ int rdma_addr_size(struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_addr_size);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr)
+{
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_in6);
+
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
+{
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_kss);
+
static struct rdma_addr_client self;
void rdma_addr_register_client(struct rdma_addr_client *client)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 30f01613b518..cbe5324c4331 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4039,6 +4039,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
struct cma_multicast *mc;
int ret;
+ if (!id->device)
+ return -EINVAL;
+
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
@@ -4336,7 +4339,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef603a468..15f4bdf89fe1 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -999,8 +999,7 @@ static int __init ib_core_init(void)
return -ENOMEM;
ib_comp_wq = alloc_workqueue("ib-comp-wq",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- WQ_UNBOUND_MAX_ACTIVE);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!ib_comp_wq) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index ade71e7f0131..2fe4c2c921de 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -664,6 +664,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 9520154f1d7c..f2f1c9fec0b1 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
ctx = idr_find(&ctx_idr, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
- else if (ctx->file != file)
+ else if (ctx->file != file || !ctx->cm_id)
ctx = ERR_PTR(-EINVAL);
return ctx;
}
@@ -454,6 +454,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx;
+ struct rdma_cm_id *cm_id;
enum ib_qp_type qp_type;
int ret;
@@ -474,10 +475,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return -ENOMEM;
ctx->uid = cmd.uid;
- ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
- ucma_event_handler, ctx, cmd.ps, qp_type);
- if (IS_ERR(ctx->cm_id)) {
- ret = PTR_ERR(ctx->cm_id);
+ cm_id = rdma_create_id(current->nsproxy->net_ns,
+ ucma_event_handler, ctx, cmd.ps, qp_type);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
goto err1;
}
@@ -487,14 +488,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = -EFAULT;
goto err2;
}
+
+ ctx->cm_id = cm_id;
return 0;
err2:
- rdma_destroy_id(ctx->cm_id);
+ rdma_destroy_id(cm_id);
err1:
mutex_lock(&mut);
idr_remove(&ctx_idr, ctx->id);
mutex_unlock(&mut);
+ mutex_lock(&file->mut);
+ list_del(&ctx->list);
+ mutex_unlock(&file->mut);
kfree(ctx);
return ret;
}
@@ -624,6 +630,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_in6(&cmd.addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -637,22 +646,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_bind cmd;
- struct sockaddr *addr;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- addr = (struct sockaddr *) &cmd.addr;
- if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
+ if (cmd.reserved || !cmd.addr_size ||
+ cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_bind_addr(ctx->cm_id, addr);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
ucma_put_ctx(ctx);
return ret;
}
@@ -668,13 +676,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
+ !rdma_addr_size_in6(&cmd.dst_addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
- (struct sockaddr *) &cmd.dst_addr,
- cmd.timeout_ms);
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -684,24 +695,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
int in_len, int out_len)
{
struct rdma_ucm_resolve_addr cmd;
- struct sockaddr *src, *dst;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- src = (struct sockaddr *) &cmd.src_addr;
- dst = (struct sockaddr *) &cmd.dst_addr;
- if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
- !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
+ if (cmd.reserved ||
+ (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
+ !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -1139,10 +1149,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (cmd.qp_state > IB_QPS_ERR)
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (!ctx->cm_id->device) {
+ ret = -EINVAL;
+ goto out;
+ }
+
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
@@ -1213,6 +1231,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
if (!optlen)
return -EINVAL;
+ if (!ctx->cm_id->device)
+ return -EINVAL;
+
memset(&sa_path, 0, sizeof(sa_path));
ib_sa_unpack_path(path_data->path_rec, &sa_path);
@@ -1275,6 +1296,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+ return -EINVAL;
+
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen);
if (IS_ERR(optval)) {
@@ -1296,7 +1320,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_notify cmd;
struct ucma_context *ctx;
- int ret;
+ int ret = -EINVAL;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
@@ -1305,7 +1329,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -1324,7 +1350,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr;
- if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ if (cmd->addr_size != rdma_addr_size(addr))
return -EINVAL;
if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
@@ -1391,7 +1417,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.response = cmd.response;
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
- join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
+ join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
+ if (!join_cmd.addr_size)
+ return -EINVAL;
+
join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
@@ -1407,6 +1436,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_kss(&cmd.addr))
+ return -EINVAL;
+
return ucma_process_join(file, &cmd, out_len);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c22fde6207d1..e74aa1d60fdb 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -193,7 +193,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
@@ -357,7 +357,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d118ffe0bfb6..4b717cf50d27 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2491,9 +2491,13 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
+ if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
+ sizeof (struct ib_sge))
+ return NULL;
+
return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
num_sge * sizeof (struct ib_sge), GFP_KERNEL);
-};
+}
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
@@ -2720,6 +2724,13 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
goto err;
}
+ if (user_wr->num_sge >=
+ (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
+ sizeof (struct ib_sge)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
user_wr->num_sge * sizeof (struct ib_sge),
GFP_KERNEL);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 44b1104eb168..c5e921bf9130 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -735,12 +735,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
return -1;
}
+static bool verify_command_idx(u32 command, bool extended)
+{
+ if (extended)
+ return command < ARRAY_SIZE(uverbs_ex_cmd_table);
+
+ return command < ARRAY_SIZE(uverbs_cmd_table);
+}
+
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
+ bool extended_command;
__u32 command;
__u32 flags;
int srcu_key;
@@ -770,6 +779,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ flags = (hdr.command &
+ IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+
+ extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
+ if (!verify_command_idx(command, extended_command)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (verify_command_mask(ib_dev, command)) {
ret = -EOPNOTSUPP;
goto out;
@@ -781,12 +799,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- flags = (hdr.command &
- IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
-
if (!flags) {
- if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[command]) {
+ if (!uverbs_cmd_table[command]) {
ret = -EINVAL;
goto out;
}
@@ -807,8 +821,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
struct ib_udata uhw;
size_t written_count = count;
- if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
- !uverbs_ex_cmd_table[command]) {
+ if (!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS;
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 6512a555f7f8..dd18b74cd01d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
+ kfree_skb(skb);
return 0;
}
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
+ kfree_skb(skb);
return 0;
}
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
PDBG("%s rdev %p\n", __func__, rdev);
req->cmd = CPL_ABORT_NO_RST;
+ skb_get(skb);
ret = c4iw_ofld_send(rdev, skb);
if (ret) {
__state_set(&ep->com, DEAD);
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
- }
+ } else
+ kfree_skb(skb);
}
static int send_flowc(struct c4iw_ep *ep)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b85a1a983e07..9e0f2ccf1d10 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -856,6 +856,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->status_page->db_off = 0;
+ init_completion(&rdev->rqt_compl);
+ init_completion(&rdev->pbl_compl);
+ kref_init(&rdev->rqt_kref);
+ kref_init(&rdev->pbl_kref);
+
return 0;
err_free_status_page:
free_page((unsigned long)rdev->status_page);
@@ -872,12 +877,14 @@ destroy_resource:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
- destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
+ wait_for_completion(&rdev->pbl_compl);
+ wait_for_completion(&rdev->rqt_compl);
c4iw_destroy_resource(&rdev->resource);
+ destroy_workqueue(rdev->free_workq);
}
static void c4iw_dealloc(struct uld_ctx *ctx)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7d540667dad2..896dff7a6cee 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -186,6 +186,10 @@ struct c4iw_rdev {
struct wr_log_entry *wr_log;
int wr_log_size;
struct workqueue_struct *free_workq;
+ struct completion rqt_compl;
+ struct completion pbl_compl;
+ struct kref rqt_kref;
+ struct kref pbl_kref;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 67df71a7012e..803c677e21cd 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur;
+ kref_get(&rdev->pbl_kref);
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_pblpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+ gen_pool_destroy(rdev->pbl_pool);
+ complete(&rdev->pbl_compl);
+}
+
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
@@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -312,7 +323,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->pbl_pool);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
/*
@@ -333,12 +344,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur;
+ kref_get(&rdev->rqt_kref);
} else
rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_rqtpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+ gen_pool_destroy(rdev->rqt_pool);
+ complete(&rdev->rqt_compl);
+}
+
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
@@ -346,6 +367,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -383,7 +405,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->rqt_pool);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 4682909b021b..7853b0caad32 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6379,18 +6379,17 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
*
* The expectation is that the caller of this routine would have taken
* care of properly transitioning the link into the correct state.
+ * NOTE: the caller needs to acquire the dd->dc8051_lock lock
+ * before calling this function.
*/
-static void dc_shutdown(struct hfi1_devdata *dd)
+static void _dc_shutdown(struct hfi1_devdata *dd)
{
- unsigned long flags;
+ lockdep_assert_held(&dd->dc8051_lock);
- spin_lock_irqsave(&dd->dc8051_lock, flags);
- if (dd->dc_shutdown) {
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+ if (dd->dc_shutdown)
return;
- }
+
dd->dc_shutdown = 1;
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
/* Shutdown the LCB */
lcb_shutdown(dd, 1);
/*
@@ -6401,35 +6400,45 @@ static void dc_shutdown(struct hfi1_devdata *dd)
write_csr(dd, DC_DC8051_CFG_RST, 0x1);
}
+static void dc_shutdown(struct hfi1_devdata *dd)
+{
+ mutex_lock(&dd->dc8051_lock);
+ _dc_shutdown(dd);
+ mutex_unlock(&dd->dc8051_lock);
+}
+
/*
* Calling this after the DC has been brought out of reset should not
* do any damage.
+ * NOTE: the caller needs to acquire the dd->dc8051_lock lock
+ * before calling this function.
*/
-static void dc_start(struct hfi1_devdata *dd)
+static void _dc_start(struct hfi1_devdata *dd)
{
- unsigned long flags;
- int ret;
+ lockdep_assert_held(&dd->dc8051_lock);
- spin_lock_irqsave(&dd->dc8051_lock, flags);
if (!dd->dc_shutdown)
- goto done;
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+ return;
+
/* Take the 8051 out of reset */
write_csr(dd, DC_DC8051_CFG_RST, 0ull);
/* Wait until 8051 is ready */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) {
+ if (wait_fm_ready(dd, TIMEOUT_8051_START))
dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
__func__);
- }
+
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
/* lcb_shutdown() with abort=1 does not restore these */
write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
- spin_lock_irqsave(&dd->dc8051_lock, flags);
dd->dc_shutdown = 0;
-done:
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+}
+
+static void dc_start(struct hfi1_devdata *dd)
+{
+ mutex_lock(&dd->dc8051_lock);
+ _dc_start(dd);
+ mutex_unlock(&dd->dc8051_lock);
}
/*
@@ -8418,16 +8427,11 @@ static int do_8051_command(
{
u64 reg, completed;
int return_code;
- unsigned long flags;
unsigned long timeout;
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
- /*
- * Alternative to holding the lock for a long time:
- * - keep busy wait - have other users bounce off
- */
- spin_lock_irqsave(&dd->dc8051_lock, flags);
+ mutex_lock(&dd->dc8051_lock);
/* We can't send any commands to the 8051 if it's in reset */
if (dd->dc_shutdown) {
@@ -8453,10 +8457,8 @@ static int do_8051_command(
return_code = -ENXIO;
goto fail;
}
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
- dc_shutdown(dd);
- dc_start(dd);
- spin_lock_irqsave(&dd->dc8051_lock, flags);
+ _dc_shutdown(dd);
+ _dc_start(dd);
}
/*
@@ -8534,8 +8536,7 @@ static int do_8051_command(
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
-
+ mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -9489,8 +9490,11 @@ static int test_qsfp_read(struct hfi1_pportdata *ppd)
int ret;
u8 status;
- /* report success if not a QSFP */
- if (ppd->port_type != PORT_TYPE_QSFP)
+ /*
+ * Report success if not a QSFP or, if it is a QSFP, but the cable is
+ * not present
+ */
+ if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
return 0;
/* read byte 2, the status byte */
@@ -11846,6 +11850,10 @@ static void free_cntrs(struct hfi1_devdata *dd)
dd->scntrs = NULL;
kfree(dd->cntrnames);
dd->cntrnames = NULL;
+ if (dd->update_cntr_wq) {
+ destroy_workqueue(dd->update_cntr_wq);
+ dd->update_cntr_wq = NULL;
+ }
}
static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
@@ -12001,7 +12009,7 @@ u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
}
-static void update_synth_timer(unsigned long opaque)
+static void do_update_synth_timer(struct work_struct *work)
{
u64 cur_tx;
u64 cur_rx;
@@ -12010,8 +12018,8 @@ static void update_synth_timer(unsigned long opaque)
int i, j, vl;
struct hfi1_pportdata *ppd;
struct cntr_entry *entry;
-
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
+ update_cntr_work);
/*
* Rather than keep beating on the CSRs pick a minimal set that we can
@@ -12094,7 +12102,13 @@ static void update_synth_timer(unsigned long opaque)
} else {
hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
}
+}
+
+static void update_synth_timer(unsigned long opaque)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
@@ -12330,6 +12344,13 @@ static int init_cntrs(struct hfi1_devdata *dd)
if (init_cpu_counters(dd))
goto bail;
+ dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
+ WQ_MEM_RECLAIM, dd->unit);
+ if (!dd->update_cntr_wq)
+ goto bail;
+
+ INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
+
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
return 0;
bail:
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cc87fd4e534b..a3279f3d2578 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -475,7 +475,7 @@ struct rvt_sge_state;
#define HFI1_PART_ENFORCE_OUT 0x2
/* how often we check for synthetic counter wrap around */
-#define SYNTH_CNT_TIME 2
+#define SYNTH_CNT_TIME 3
/* Counter flags */
#define CNTR_NORMAL 0x0 /* Normal counters, just read register */
@@ -929,8 +929,9 @@ struct hfi1_devdata {
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
/* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
spinlock_t uctxt_lock; /* rcd and user context changes */
- /* exclusive access to 8051 */
- spinlock_t dc8051_lock;
+ struct mutex dc8051_lock; /* exclusive access to 8051 */
+ struct workqueue_struct *update_cntr_wq;
+ struct work_struct update_cntr_work;
/* exclusive access to 8051 memory */
spinlock_t dc8051_memlock;
int dc8051_timed_out; /* remember if the 8051 timed out */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index a3dd27b1305d..ae1f90ddd4e8 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1049,6 +1049,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
return ERR_PTR(-ENOMEM);
dd->num_pports = nports;
dd->pport = (struct hfi1_pportdata *)(dd + 1);
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
idr_preload(GFP_KERNEL);
@@ -1078,11 +1080,11 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
spin_lock_init(&dd->uctxt_lock);
spin_lock_init(&dd->hfi1_diag_trans_lock);
spin_lock_init(&dd->sc_init_lock);
- spin_lock_init(&dd->dc8051_lock);
spin_lock_init(&dd->dc8051_memlock);
seqlock_init(&dd->sc2vl_lock);
spin_lock_init(&dd->sde_map_lock);
spin_lock_init(&dd->pio_map_lock);
+ mutex_init(&dd->dc8051_lock);
init_waitqueue_head(&dd->event_queue);
dd->int_counter = alloc_percpu(u64);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 335613a1a46a..717626046ee5 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -162,9 +162,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
unsigned long len;
resource_size_t addr;
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 919a5474e651..621b60ab74ee 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
};
static struct attribute *port_cc_default_attributes[] = {
- &cc_prescan_attr.attr
+ &cc_prescan_attr.attr,
+ NULL
};
static struct kobj_type port_cc_ktype = {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 2c4b4d072d6a..3b973cba8975 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -3644,8 +3644,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
+ roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
+ roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index d1328a697750..8ce599e1639c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -86,6 +86,7 @@
#define RDMA_OPCODE_MASK 0x0f
#define RDMA_READ_REQ_OPCODE 1
#define Q2_BAD_FRAME_OFFSET 72
+#define Q2_FPSN_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
#define I40IW_TERM_SENT 0x01
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c62d354f7810..3ed49146d73c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1320,7 +1320,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
/* first partial seq # in q2 */
- u32 fps = qp->q2_buf[16];
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
struct list_head *plist;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c41c8d0a4ac0..19bc1c2186ff 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1168,7 +1168,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
/* need to protect from a race on closing the vma as part of
* mlx4_ib_vma_close().
*/
- down_read(&owning_mm->mmap_sem);
+ down_write(&owning_mm->mmap_sem);
for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma;
if (!vma)
@@ -1182,11 +1182,13 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
BUG_ON(1);
}
+ context->hw_bar_info[i].vma->vm_flags &=
+ ~(VM_SHARED | VM_MAYSHARE);
/* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL;
}
- up_read(&owning_mm->mmap_sem);
+ up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 5d73989d9771..ae41623e0f13 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -406,7 +406,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -417,6 +416,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fcd04b881ec1..fc62a7ded734 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -172,6 +172,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_ib_srq *srq;
struct mlx5_ib_wq *wq;
u16 wqe_ctr;
+ u8 roce_packet_type;
+ bool vlan_present;
u8 g;
if (qp->ibqp.srq || qp->ibqp.xrcd) {
@@ -223,7 +225,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
break;
}
wc->slid = be16_to_cpu(cqe->slid);
- wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -237,10 +238,22 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->pkey_index = 0;
}
- if (ll != IB_LINK_LAYER_ETHERNET)
+ if (ll != IB_LINK_LAYER_ETHERNET) {
+ wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
return;
+ }
+
+ vlan_present = cqe->l4_l3_hdr_type & 0x1;
+ roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
+ if (vlan_present) {
+ wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
+ wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ } else {
+ wc->sl = 0;
+ }
- switch (wc->sl & 0x3) {
+ switch (roce_packet_type) {
case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
wc->network_hdr_type = RDMA_NETWORK_IB;
break;
@@ -1117,7 +1130,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5e29fbd3a5a0..d7da1dca765f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1313,7 +1313,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
/* need to protect from a race on closing the vma as part of
* mlx5_ib_vma_close.
*/
- down_read(&owning_mm->mmap_sem);
+ down_write(&owning_mm->mmap_sem);
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) {
vma = vma_private->vma;
@@ -1323,11 +1323,12 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
/* context going to be destroyed, should
* not access ops any more.
*/
+ vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
vma->vm_ops = NULL;
list_del(&vma_private->list);
kfree(vma_private);
}
- up_read(&owning_mm->mmap_sem);
+ up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 0a260a06876d..7e466f031e30 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1648,6 +1648,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
MLX5_SET(mkc, mkc, umr_en, 1);
+ mr->ibmr.device = pd->device;
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
goto err_destroy_psv;
@@ -1820,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
- mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
if (unlikely(i >= mr->max_descs))
@@ -1832,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
sg_offset = 0;
}
+ mr->ndescs = i;
if (sg_offset_p)
*sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index fdd156101a72..3cdcbfbd6a79 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -253,7 +253,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
@@ -1130,7 +1134,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
ib_umem_release(sq->ubuffer.umem);
}
-static int get_rq_pas_size(void *qpc)
+static size_t get_rq_pas_size(void *qpc)
{
u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
@@ -1146,7 +1150,8 @@ static int get_rq_pas_size(void *qpc)
}
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, void *qpin)
+ struct mlx5_ib_rq *rq, void *qpin,
+ size_t qpinlen)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1155,9 +1160,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
void *rqc;
void *wq;
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
- int inlen;
+ size_t rq_pas_size = get_rq_pas_size(qpc);
+ size_t inlen;
int err;
- u32 rq_pas_size = get_rq_pas_size(qpc);
+
+ if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
+ return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
in = mlx5_vzalloc(inlen);
@@ -1235,7 +1243,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u32 *in,
+ u32 *in, size_t inlen,
struct ib_pd *pd)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1262,7 +1270,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->rq.wqe_cnt) {
rq->base.container_mibqp = qp;
- err = create_raw_packet_qp_rq(dev, rq, in);
+ err = create_raw_packet_qp_rq(dev, rq, in, inlen);
if (err)
goto err_destroy_sq;
@@ -1753,10 +1761,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX5_IB_QP_LSO;
}
+ if (inlen < 0) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, pd);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
@@ -1796,6 +1809,7 @@ err_create:
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
+err:
kvfree(in);
return err;
}
@@ -2154,18 +2168,18 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
- if (rate == IB_RATE_PORT_CURRENT) {
+ if (rate == IB_RATE_PORT_CURRENT)
return 0;
- } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
+
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
return -EINVAL;
- } else {
- while (rate != IB_RATE_2_5_GBPS &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
- --rate;
- }
- return rate + MLX5_STAT_RATE_OFFSET;
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ --rate;
+
+ return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
@@ -2838,7 +2852,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ if (new_state == IB_QPS_RESET &&
+ !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
ibqp->srq ? to_msrq(ibqp->srq) : NULL);
if (send_cq != recv_cq)
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index d61fd2c727c0..5c1dbe2f8757 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -243,8 +243,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_srq *srq;
- int desc_size;
- int buf_size;
+ size_t desc_size;
+ size_t buf_size;
int err;
struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
@@ -268,15 +268,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+ return ERR_PTR(-EINVAL);
desc_size = roundup_pow_of_two(desc_size);
- desc_size = max_t(int, 32, desc_size);
+ desc_size = max_t(size_t, 32, desc_size);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+ return ERR_PTR(-EINVAL);
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
- desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
- srq->msrq.max_avail_gather);
+ if (buf_size < desc_size)
+ return ERR_PTR(-EINVAL);
in.type = init_attr->srq_type;
if (pd->uobject)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 8bef09a8c49f..265943069b35 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -836,7 +836,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+ if (!debugfs_create_file("reset_stats", 0200, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index 16c446142c2a..b0f09fb45c72 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -119,7 +119,7 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
spin_lock_irqsave(&dev->n_ahs_lock, flags);
if (dev->n_ahs_allocated == dev->dparms.props.max_ah) {
- spin_unlock(&dev->n_ahs_lock);
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
kfree(ah);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6d9904a4a0ab..29dc5278d7be 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -197,7 +197,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -213,7 +213,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
@@ -368,7 +370,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 8c0ddd7165ae..0d25dc84d294 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -471,8 +471,6 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
state = RESPST_ERR_LENGTH;
goto err;
}
-
- qp->resp.resid = mtu;
} else {
if (pktlen != resid) {
state = RESPST_ERR_LENGTH;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 59f37f412a7f..ced416f5dffb 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -747,9 +747,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
memcpy(wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
- wqe->iova = (mask & WR_ATOMIC_MASK) ?
- atomic_wr(ibwr)->remote_addr :
- rdma_wr(ibwr)->remote_addr;
+ wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
+ mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
wqe->mask = mask;
wqe->dma.length = length;
wqe->dma.resid = length;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 335bd2c9e16e..34122c96522b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -974,6 +974,19 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
*/
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
+
+ /*
+ * Update the broadcast address in the priv->broadcast object,
+ * in case it already exists, otherwise no one will do that.
+ */
+ if (priv->broadcast) {
+ spin_lock_irq(&priv->lock);
+ memcpy(priv->broadcast->mcmember.mgid.raw,
+ priv->dev->broadcast + 4,
+ sizeof(union ib_gid));
+ spin_unlock_irq(&priv->lock);
+ }
+
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 183db0cd849e..0df7d4504c06 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -799,6 +799,22 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -919,8 +935,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -933,7 +949,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -971,7 +995,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
@@ -988,7 +1012,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -998,6 +1022,8 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1103,8 +1129,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index cca1bb4fbfe3..982e97f52090 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -487,6 +487,9 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return -EINVAL;
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -645,8 +648,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (mcast->backoff == 1 ||
time_after_eq(jiffies, mcast->delay_until)) {
/* Found the next unjoined group */
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
if (ipoib_mcast_join(dev, mcast)) {
spin_unlock_irq(&priv->lock);
return;
@@ -666,11 +667,9 @@ out:
queue_delayed_work(priv->wq, &priv->mcast_task,
delay_until - jiffies);
}
- if (mcast) {
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (mcast)
ipoib_mcast_join(dev, mcast);
- }
+
spin_unlock_irq(&priv->lock);
}
@@ -818,7 +817,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 0983470929bd..b879d21b7548 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2098,6 +2098,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
u32 rkey, offset;
int ret;
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
+
if (dir == DMA_FROM_DEVICE) {
addr = cmd->write_va;
rkey = cmd->write_stag;
@@ -2125,11 +2128,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
se_cmd->t_data_sg, se_cmd->t_data_nents,
offset, addr, rkey, dir);
}
+
if (ret < 0) {
isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
}
+ cmd->ctx_init_done = true;
+
+rdma_ctx_post:
ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
if (ret < 0)
isert_err("Cmd: %p failed to post RDMA res\n", cmd);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c02ada57d7f5..85bd19753d55 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -124,6 +124,7 @@ struct isert_cmd {
struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
+ bool ctx_init_done;
};
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 84f91858b5e6..463ea592a42a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2626,9 +2626,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
ret = FAST_IO_FAIL;
else
ret = FAILED;
- srp_free_req(ch, req, scmnd, 0);
- scmnd->result = DID_ABORT << 16;
- scmnd->scsi_done(scmnd);
+ if (ret == SUCCESS) {
+ srp_free_req(ch, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
+ scmnd->scsi_done(scmnd);
+ }
return ret;
}
@@ -3395,12 +3397,10 @@ static ssize_t srp_create_target(struct device *dev,
num_online_nodes());
const int ch_end = ((node_idx + 1) * target->ch_count /
num_online_nodes());
- const int cv_start = (node_idx * ibdev->num_comp_vectors /
- num_online_nodes() + target->comp_vector)
- % ibdev->num_comp_vectors;
- const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
- num_online_nodes() + target->comp_vector)
- % ibdev->num_comp_vectors;
+ const int cv_start = node_idx * ibdev->num_comp_vectors /
+ num_online_nodes();
+ const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
+ num_online_nodes();
int cpu_idx = 0;
for_each_online_cpu(cpu) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 29ab814693fc..876f438aa048 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2292,12 +2292,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
- || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
- atomic_inc(&ch->req_lim_delta);
- srpt_abort_cmd(ioctx);
+ if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
- }
/* For read commands, transfer the data to the initiator. */
if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
@@ -2670,7 +2666,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
struct srpt_rdma_ch *ch = ioctx->ch;
unsigned long flags;
- WARN_ON(ioctx->state != SRPT_STATE_DONE);
+ WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
+ !(ioctx->cmd.transport_state & CMD_T_ABORTED));
if (ioctx->n_rw_ctx) {
srpt_free_rw_ctxs(ch, ioctx);
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf2660116..5f04b2d94635 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
const struct input_device_id *id)
{
struct input_leds *leds;
+ struct input_led *led;
unsigned int num_leds;
unsigned int led_code;
int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
- struct input_led *led = &leds->leds[led_no];
+ if (!input_led_info[led_code].name)
+ continue;
+ led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
- if (!input_led_info[led_code].name)
- continue;
-
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev),
input_led_info[led_code].name);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 7f12b6579f82..795fa353de7c 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
{
struct matrix_keypad *keypad = input_get_drvdata(dev);
+ spin_lock_irq(&keypad->lock);
keypad->stopped = true;
- mb();
+ spin_unlock_irq(&keypad->lock);
+
flush_work(&keypad->work.work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 5a5778729e37..76bb51309a78 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = {
};
MODULE_DEVICE_TABLE(i2c, qt1070_id);
+#ifdef CONFIG_OF
+static const struct of_device_id qt1070_of_match[] = {
+ { .compatible = "qt1070", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qt1070_of_match);
+#endif
+
static struct i2c_driver qt1070_driver = {
.driver = {
.name = "qt1070",
+ .of_match_table = of_match_ptr(qt1070_of_match),
.pm = &qt1070_pm_ops,
},
.id_table = qt1070_id,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 3048ef3e3e16..a5e8998047fe 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -189,8 +189,6 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
input_event(input, EV_MSC, MSC_SCAN, code);
input_report_key(input, keymap[code], state);
- /* Read for next loop */
- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
} while (1);
input_sync(input);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e55439..251d64ca41ce 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
if (pdata) {
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index 603fc2fadf05..12b20840fb74 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -70,7 +70,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev)
pwr->phys = "twl4030_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
- err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq,
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL, powerbutton_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"twl4030_pwrbutton", pwr);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index af83d2e34913..a8a96def0ba2 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2538,13 +2538,31 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
}
static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
- struct alps_data *priv)
+ struct alps_data *priv,
+ struct psmouse *psmouse)
{
bool is_dual = false;
+ int reg_val = 0;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
- if (IS_SS4PLUS_DEV(priv->dev_id))
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
is_dual = (otp[0][0] >> 4) & 0x01;
+ if (!is_dual) {
+ /* For support TrackStick of Thinkpad L/E series */
+ if (alps_exit_command_mode(psmouse) == 0 &&
+ alps_enter_command_mode(psmouse) == 0) {
+ reg_val = alps_command_mode_read_reg(psmouse,
+ 0xD7);
+ }
+ alps_exit_command_mode(psmouse);
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+ if (reg_val == 0x0C || reg_val == 0x1D)
+ is_dual = true;
+ }
+ }
+
if (is_dual)
priv->flags |= ALPS_DUALPOINT |
ALPS_DUALPOINT_WITH_PRESSURE;
@@ -2567,7 +2585,7 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
alps_update_btn_info_ss4_v2(otp, priv);
- alps_update_dual_info_ss4_v2(otp, priv);
+ alps_update_dual_info_ss4_v2(otp, priv, psmouse);
return 0;
}
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index c9d491bc85e0..3851d5715772 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client,
return error;
}
+ /* Make sure there is something at this address */
+ error = i2c_smbus_read_byte(client);
+ if (error < 0) {
+ dev_dbg(&client->dev, "nothing at this address: %d\n", error);
+ return -ENXIO;
+ }
+
/* Initialize the touchpad. */
error = elan_initialize(data);
if (error)
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56c44cd..765879dcaf85 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
long ret;
int error;
int len;
- u8 buffer[ETP_I2C_INF_LENGTH];
+ u8 buffer[ETP_I2C_REPORT_LEN];
+
+ len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
+ if (len != ETP_I2C_REPORT_LEN) {
+ error = len < 0 ? len : -EIO;
+ dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
+ error, len);
+ }
reinit_completion(completion);
enable_irq(client->irq);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 59603a5728f7..c519c0b09568 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1711,6 +1711,17 @@ int elantech_init(struct psmouse *psmouse)
etd->samples[0], etd->samples[1], etd->samples[2]);
}
+ if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) {
+ /*
+ * This module has a bug which makes absolute mode
+ * unusable, so let's abort so we'll be using standard
+ * PS/2 protocol.
+ */
+ psmouse_info(psmouse,
+ "absolute mode broken, forcing standard PS/2 protocol\n");
+ goto init_fail;
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b604564dec5c..30328e57fdda 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -15,6 +15,7 @@
#define MOUSEDEV_MINORS 31
#define MOUSEDEV_MIX 63
+#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
@@ -103,7 +104,7 @@ struct mousedev_client {
spinlock_t packet_lock;
int pos_x, pos_y;
- signed char ps2[6];
+ u8 ps2[6];
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
@@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev,
}
client->pos_x += packet->dx;
- client->pos_x = client->pos_x < 0 ?
- 0 : (client->pos_x >= xres ? xres : client->pos_x);
+ client->pos_x = clamp_val(client->pos_x, 0, xres);
+
client->pos_y += packet->dy;
- client->pos_y = client->pos_y < 0 ?
- 0 : (client->pos_y >= yres ? yres : client->pos_y);
+ client->pos_y = clamp_val(client->pos_y, 0, yres);
p->dx += packet->dx;
p->dy += packet->dy;
@@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file)
return error;
}
-static inline int mousedev_limit_delta(int delta, int limit)
-{
- return delta > limit ? limit : (delta < -limit ? -limit : delta);
-}
-
-static void mousedev_packet(struct mousedev_client *client,
- signed char *ps2_data)
+static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
{
struct mousedev_motion *p = &client->packets[client->tail];
+ s8 dx, dy, dz;
+
+ dx = clamp_val(p->dx, -127, 127);
+ p->dx -= dx;
+
+ dy = clamp_val(p->dy, -127, 127);
+ p->dy -= dy;
- ps2_data[0] = 0x08 |
- ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
- ps2_data[1] = mousedev_limit_delta(p->dx, 127);
- ps2_data[2] = mousedev_limit_delta(p->dy, 127);
- p->dx -= ps2_data[1];
- p->dy -= ps2_data[2];
+ ps2_data[0] = BIT(3);
+ ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
+ ps2_data[0] |= p->buttons & 0x07;
+ ps2_data[1] = dx;
+ ps2_data[2] = dy;
switch (client->mode) {
case MOUSEDEV_EMUL_EXPS:
- ps2_data[3] = mousedev_limit_delta(p->dz, 7);
- p->dz -= ps2_data[3];
- ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
+ dz = clamp_val(p->dz, -7, 7);
+ p->dz -= dz;
+
+ ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_IMPS:
- ps2_data[0] |=
- ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
- ps2_data[3] = mousedev_limit_delta(p->dz, 127);
- p->dz -= ps2_data[3];
+ dz = clamp_val(p->dz, -127, 127);
+ p->dz -= dz;
+
+ ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+ ((p->buttons & 0x08) >> 1);
+ ps2_data[3] = dz;
+
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_PS2:
default:
- ps2_data[0] |=
- ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
p->dz = 0;
+
+ ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+ ((p->buttons & 0x08) >> 1);
+
client->bufsiz = 3;
break;
}
@@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- signed char data[sizeof(client->ps2)];
+ u8 data[sizeof(client->ps2)];
int retval = 0;
if (!client->ready && !client->buffer && mousedev->exist &&
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d1051e3ce819..e484ea2dc787 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -530,6 +530,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{ }
};
+static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
+ {
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ },
+ },
+ { }
+};
+
/*
* On some Asus laptops, just running self tests cause problems.
*/
@@ -693,6 +707,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
},
},
{
+ /* Lenovo ThinkPad L460 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ },
+ },
+ {
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
@@ -1223,6 +1244,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_nomux_table))
i8042_nomux = true;
+ if (dmi_check_system(i8042_dmi_forcemux_table))
+ i8042_nomux = false;
+
if (dmi_check_system(i8042_dmi_notimeout_table))
i8042_notimeout = true;
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index 71b5a634cf6d..e7bb155911d0 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -152,7 +152,7 @@ static int __maybe_unused ar1021_i2c_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume);
static const struct i2c_device_id ar1021_i2c_id[] = {
- { "MICROCHIP_AR1021_I2C", 0 },
+ { "ar1021", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index e5d185fe69b9..26132402fdf9 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3028,6 +3028,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
.driver_data = samus_platform_data,
},
{
+ /* Samsung Chromebook Pro */
+ .ident = "Samsung Chromebook Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+ },
+ .driver_data = samus_platform_data,
+ },
+ {
/* Other Google Chromebooks */
.ident = "Chromebook",
.matches = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 240b16f3ee97..5907fddcc966 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -778,8 +778,10 @@ static int __maybe_unused goodix_suspend(struct device *dev)
int error;
/* We need gpio pins to suspend/resume */
- if (!ts->gpiod_int || !ts->gpiod_rst)
+ if (!ts->gpiod_int || !ts->gpiod_rst) {
+ disable_irq(client->irq);
return 0;
+ }
wait_for_completion(&ts->firmware_loading_complete);
@@ -819,8 +821,10 @@ static int __maybe_unused goodix_resume(struct device *dev)
struct goodix_ts_data *ts = i2c_get_clientdata(client);
int error;
- if (!ts->gpiod_int || !ts->gpiod_rst)
+ if (!ts->gpiod_int || !ts->gpiod_rst) {
+ enable_irq(client->irq);
return 0;
+ }
/*
* Exit sleep mode by outputting HIGH level to INT pin
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 5d0cd51c6f41..a4b7b4c3d27b 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client,
tsc2007_stop(ts);
+ /* power down the chip (TSC2007_SETUP does not ACK on I2C) */
+ err = tsc2007_xfer(ts, PWRDOWN);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to setup chip: %d\n", err);
+ return err; /* usually, chip does not respond */
+ }
+
err = input_register_device(input_dev);
if (err) {
dev_err(&client->dev,
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index cb72e0011310..f846f0140a9d 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -127,6 +127,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
iommu->name);
dmar_free_hwirq(irq);
+ iommu->pr_irq = 0;
goto err;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
@@ -142,9 +143,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
- free_irq(iommu->pr_irq, iommu);
- dmar_free_hwirq(iommu->pr_irq);
- iommu->pr_irq = 0;
+ if (iommu->pr_irq) {
+ free_irq(iommu->pr_irq, iommu);
+ dmar_free_hwirq(iommu->pr_irq);
+ iommu->pr_irq = 0;
+ }
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
@@ -386,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
+ kfree(sdev);
goto out;
}
svm->pasid = ret;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 359d5d169ec0..1c5a4cc1f0c6 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -139,7 +139,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
break; /* found a free slot */
}
adjust_limit_pfn:
- limit_pfn = curr_iova->pfn_lo - 1;
+ limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
move_left:
prev = curr;
curr = rb_prev(curr);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index e2583cce2cc1..54556713c8d1 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1299,6 +1299,7 @@ static int __init omap_iommu_init(void)
const unsigned long flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
+ int ret;
np = of_find_matching_node(NULL, omap_iommu_of_match);
if (!np)
@@ -1312,11 +1313,25 @@ static int __init omap_iommu_init(void)
return -ENOMEM;
iopte_cachep = p;
- bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
-
omap_iommu_debugfs_init();
- return platform_driver_register(&omap_iommu_driver);
+ ret = platform_driver_register(&omap_iommu_driver);
+ if (ret) {
+ pr_err("%s: failed to register driver\n", __func__);
+ goto fail_driver;
+ }
+
+ ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ if (ret)
+ goto fail_bus;
+
+ return 0;
+
+fail_bus:
+ platform_driver_unregister(&omap_iommu_driver);
+fail_driver:
+ kmem_cache_destroy(iopte_cachep);
+ return ret;
}
subsys_initcall(omap_iommu_init);
/* must be ready before omap3isp is probed */
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9ae71804b5dd..1c2ca8d51a70 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -21,6 +21,8 @@
#include "irq-gic-common.h"
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
static const struct gic_kvm_info *gic_kvm_info;
const struct gic_kvm_info *gic_get_kvm_info(void)
@@ -52,11 +54,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
u32 confoff = (irq / 16) * 4;
u32 val, oldval;
int ret = 0;
+ unsigned long flags;
/*
* Read current configuration register, and insert the config
* for "irq", depending on "type".
*/
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
if (type & IRQ_TYPE_LEVEL_MASK)
val &= ~confmask;
@@ -64,8 +68,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
val |= confmask;
/* If the current configuration is the same, then we are done */
- if (val == oldval)
+ if (val == oldval) {
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return 0;
+ }
/*
* Write back the new configuration, and possibly re-enable
@@ -83,6 +89,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
pr_warn("GIC: PPI%d is secure or misconfigured\n",
irq - 16);
}
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
if (sync_access)
sync_access();
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index acb9d250a905..ac15e5d5d9b2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -684,7 +684,7 @@ static struct irq_chip its_irq_chip = {
* This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
*/
#define IRQS_PER_CHUNK_SHIFT 5
-#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
+#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
static unsigned long *lpi_bitmap;
static u32 lpi_chunks;
@@ -1320,11 +1320,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
- * At least one bit of EventID is being used, hence a minimum
- * of two entries. No, the architecture doesn't let you
- * express an ITT with a single entry.
+ * We allocate at least one chunk worth of LPIs bet device,
+ * and thus that many ITEs. The device may require less though.
*/
- nr_ites = max(2UL, roundup_pow_of_two(nvecs));
+ nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index a37576a1798d..100c80e48190 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -616,7 +616,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- smp_wmb();
+ wmb();
for_each_cpu(cpu, mask) {
unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
@@ -1250,6 +1250,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
@@ -1299,6 +1303,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
return 0;
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 03b79b061d24..05d87f60d929 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -105,10 +105,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
u32 *mask, u32 *addr)
{
- unsigned int ofst;
-
- hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
- ofst = hwirq / 32 * 4;
+ unsigned int ofst = (hwirq / 32) * 4;
*mask = 1 << (hwirq % 32);
*addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d74374f25392..abf696b49dd7 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -55,6 +55,7 @@ static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
+DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
static void __gic_irq_dispatch(void);
@@ -746,17 +747,17 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
return gic_setup_dev_chip(d, virq, spec->hwirq);
} else {
- base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
+ base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
if (base_hwirq == gic_shared_intrs) {
return -ENOMEM;
}
/* check that we have enough space */
for (i = base_hwirq; i < nr_irqs; i++) {
- if (!test_bit(i, ipi_resrv))
+ if (!test_bit(i, ipi_available))
return -EBUSY;
}
- bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);
+ bitmap_clear(ipi_available, base_hwirq, nr_irqs);
/* map the hwirq for each cpu consecutively */
i = 0;
@@ -787,7 +788,7 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
return 0;
error:
- bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+ bitmap_set(ipi_available, base_hwirq, nr_irqs);
return ret;
}
@@ -802,7 +803,7 @@ void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
return;
base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
- bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+ bitmap_set(ipi_available, base_hwirq, nr_irqs);
}
int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
@@ -1066,6 +1067,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
2 * gic_vpes);
}
+ bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
gic_basic_init();
}
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..b92a19a594a1 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
- cskb = skb_copy(skb, GFP_KERNEL);
+ cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 3bce44893021..454ed4dc6ec1 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -188,6 +188,7 @@ void led_blink_set(struct led_classdev *led_cdev,
{
del_timer_sync(&led_cdev->blink_timer);
+ led_cdev->flags &= ~LED_BLINK_SW;
led_cdev->flags &= ~LED_BLINK_ONESHOT;
led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 840401ae9a4e..f6726484a8a1 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -266,7 +266,7 @@ static int pca955x_probe(struct i2c_client *client,
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata) {
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index a52674327857..8988ba3b2d65 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
if (!led)
return -ENOMEM;
- led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
map = dev_get_regmap(pdev->dev.parent, NULL);
if (!map) {
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 537903bf9add..d23337e8c4ee 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -512,15 +512,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -537,7 +543,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 28ce342348a9..4af7cd423c71 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -892,6 +892,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -937,6 +943,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u;
char buf[BDEVNAME_SIZE];
+ struct cached_dev *exist_dc, *t;
bdevname(dc->bdev, buf);
@@ -960,6 +967,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
return -EINVAL;
}
+ /* Check whether already attached */
+ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+ pr_err("Tried to attach %s but duplicate UUID already attached",
+ buf);
+
+ return -EINVAL;
+ }
+ }
+
u = uuid_find(c, dc->sb.uuid);
if (u &&
@@ -1182,7 +1199,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
return;
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", bdevname(bdev, name), err);
bcache_device_stop(&dc->disk);
}
@@ -1853,6 +1870,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
const char *err = NULL; /* must be set for any error case */
int ret = 0;
+ bdevname(bdev, name);
+
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@@ -1863,11 +1882,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
- if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
if (ret != 0) {
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else
@@ -1890,14 +1910,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto out;
}
- pr_info("registered cache device %s", bdevname(bdev, name));
+ pr_info("registered cache device %s", name);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", name, err);
return ret;
}
@@ -1986,6 +2006,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (err)
goto err_close;
+ err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
@@ -2000,7 +2021,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto err_close;
if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err_close;
+ goto err;
}
out:
if (sb_page)
@@ -2013,7 +2034,7 @@ out:
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
- pr_info("error opening %s: %s", path, err);
+ pr_info("error %s: %s", path, err);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0bf1a12e35fe..ee6045d6c0bb 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -302,6 +302,7 @@ static void do_region(int op, int op_flags, unsigned region,
special_cmd_max_sectors = q->limits.max_write_same_sectors;
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
special_cmd_max_sectors == 0) {
+ atomic_inc(&io->count);
dec_count(io, region, -EOPNOTSUPP);
return;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index a68c650aad11..b67414b5a64e 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1777,12 +1777,12 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
cmd == DM_LIST_VERSIONS_CMD)
return 0;
- if ((cmd == DM_DEV_CREATE_CMD)) {
+ if (cmd == DM_DEV_CREATE_CMD) {
if (!*param->name) {
DMWARN("name not supplied when creating device");
return -EINVAL;
}
- } else if ((*param->uuid && *param->name)) {
+ } else if (*param->uuid && *param->name) {
DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
return -EINVAL;
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index ba7edcdd09ce..fcc2b5746a9f 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1122,8 +1122,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
lock_comm(cinfo);
ret = __sendmsg(cinfo, &cmsg);
- if (ret)
+ if (ret) {
+ unlock_comm(cinfo);
return ret;
+ }
cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8ebf1b97e1d2..a7bc70334f0e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4826,8 +4826,10 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
return err;
/* cluster raid doesn't support change array_sectors */
- if (mddev_is_clustered(mddev))
+ if (mddev_is_clustered(mddev)) {
+ mddev_unlock(mddev);
return -EINVAL;
+ }
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
@@ -8224,6 +8226,10 @@ static int remove_and_add_spares(struct mddev *mddev,
int removed = 0;
bool remove_some = false;
+ if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ /* Mustn't remove devices when resync thread is running */
+ return 0;
+
rdev_for_each(rdev, mddev) {
if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 &&
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b19b551bb34b..6a7b9b1dcfe3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2704,6 +2704,11 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
+ /*
+ * In case freeze_array() is waiting for condition
+ * nr_pending == nr_queued + extra to be true.
+ */
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(conf->mddev->thread);
} else {
if (test_bit(R10BIO_WriteError,
@@ -3676,6 +3681,7 @@ static int raid10_run(struct mddev *mddev)
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
+ first = 0;
}
if (mddev->queue) {
@@ -4084,6 +4090,7 @@ static int raid10_start_reshape(struct mddev *mddev)
diff = 0;
if (first || diff < min_offset_diff)
min_offset_diff = diff;
+ first = 0;
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d2c9d70042e..724eb6fff81d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
{
int i;
- local_irq_disable();
- spin_lock(conf->hash_locks);
+ spin_lock_irq(conf->hash_locks);
for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
spin_lock(&conf->device_lock);
@@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
{
int i;
spin_unlock(&conf->device_lock);
- for (i = NR_STRIPE_HASH_LOCKS; i; i--)
- spin_unlock(conf->hash_locks + i - 1);
- local_irq_enable();
+ for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
+ spin_unlock(conf->hash_locks + i);
+ spin_unlock_irq(conf->hash_locks);
}
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
@@ -732,12 +731,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{
- local_irq_disable();
if (sh1 > sh2) {
- spin_lock(&sh2->stripe_lock);
+ spin_lock_irq(&sh2->stripe_lock);
spin_lock_nested(&sh1->stripe_lock, 1);
} else {
- spin_lock(&sh1->stripe_lock);
+ spin_lock_irq(&sh1->stripe_lock);
spin_lock_nested(&sh2->stripe_lock, 1);
}
}
@@ -745,8 +743,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{
spin_unlock(&sh1->stripe_lock);
- spin_unlock(&sh2->stripe_lock);
- local_irq_enable();
+ spin_unlock_irq(&sh2->stripe_lock);
}
/* Only freshly new full stripe normal write stripe can be added to a batch list */
@@ -3393,9 +3390,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
BUG_ON(sh->batch_head);
+
+ /*
+ * In the raid6 case if the only non-uptodate disk is P
+ * then we already trusted P to compute the other failed
+ * drives. It is safe to compute rather than re-read P.
+ * In other cases we only compute blocks from failed
+ * devices, otherwise check/repair might fail to detect
+ * a real inconsistency.
+ */
+
if ((s->uptodate == disks - 1) &&
+ ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
(s->failed && (disk_idx == s->failed_num[0] ||
- disk_idx == s->failed_num[1]))) {
+ disk_idx == s->failed_num[1])))) {
/* have disk failed, and we're requested to fetch it;
* do compute it
*/
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index b5b5b195ea7f..42ce88ceac66 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -779,6 +779,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b
goto exit;
}
+ /*
+ * It may need some time for the CAM to settle down, or there might
+ * be a race condition between the CAM, writing HC and our last
+ * check for DA. This happens, if the CAM asserts DA, just after
+ * checking DA before we are setting HC. In this case it might be
+ * a bug in the CAM to keep the FR bit, the lower layer/HW
+ * communication requires a longer timeout or the CAM needs more
+ * time internally. But this happens in reality!
+ * We need to read the status from the HW again and do the same
+ * we did for the previous check for DA
+ */
+ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (status < 0)
+ goto exit;
+
+ if (status & (STATUSREG_DA | STATUSREG_RE)) {
+ if (status & STATUSREG_DA)
+ dvb_ca_en50221_thread_wakeup(ca);
+
+ status = -EAGAIN;
+ goto exit;
+ }
+
/* send the amount of data */
if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0)
goto exit;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e0fe5bc9dbce..31f16105184c 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
* New users must use I2C client binding directly!
*/
struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
- struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+ struct i2c_adapter *i2c,
+ struct i2c_adapter **tuner_i2c_adapter)
{
struct i2c_client *client;
struct i2c_board_info board_info;
- struct m88ds3103_platform_data pdata;
+ struct m88ds3103_platform_data pdata = {};
pdata.clk = cfg->clock;
pdata.i2c_wr_max = cfg->i2c_wr_max;
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
case M88DS3103_CHIP_ID:
break;
default:
+ ret = -ENODEV;
+ dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
goto err_kfree;
}
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 20b4a659e2e4..0fe69f79a24d 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -14,6 +14,8 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
+
#include "si2168_priv.h"
static const struct dvb_frontend_ops si2168_ops;
@@ -378,6 +380,7 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
+ udelay(100);
memcpy(cmd.args, "\x85", 1);
cmd.wlen = 1;
cmd.rlen = 1;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 142ae28803bb..d558ed3e59c6 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* 6. */
cx25840_write(client, 0x115, 0x8c);
@@ -634,11 +636,13 @@ static void cx23885_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
@@ -752,11 +756,13 @@ static void cx231xx_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
cx25840_std_setup(client);
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 4bf2995e1cb8..8f85910eda5d 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client,
priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
+ priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
goto eclkget;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 26d999c812c9..0f572bff64f5 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -222,7 +222,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
u8 mask, u8 val)
{
- i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
+ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
}
static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 8aa726651630..90fcccc05b56 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
bt878_num);
if (bt878_num >= BT878_MAX) {
printk(KERN_ERR "bt878: Too many devices inserted\n");
- result = -ENOMEM;
- goto fail0;
+ return -ENOMEM;
}
if (pci_enable_device(dev))
return -EIO;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index b4be47969b6b..e17d6b945c07 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -341,6 +341,17 @@ static void solo_stop_streaming(struct vb2_queue *q)
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
solo_stop_thread(solo_dev);
+
+ spin_lock(&solo_dev->slock);
+ while (!list_empty(&solo_dev->vidq_active)) {
+ struct solo_vb2_buf *buf = list_entry(
+ solo_dev->vidq_active.next,
+ struct solo_vb2_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock(&solo_dev->slock);
INIT_LIST_HEAD(&solo_dev->vidq_active);
}
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index c12209c701d3..390d708c807a 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2169,6 +2169,12 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
pxa_dma_stop_channels(pcdev);
pxa_camera_destroy_formats(pcdev);
+
+ if (pcdev->mclk_clk) {
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ pcdev->mclk_clk = NULL;
+ }
+
video_unregister_device(&pcdev->vdev);
pcdev->sensor = NULL;
@@ -2495,7 +2501,13 @@ static int pxa_camera_remove(struct platform_device *pdev)
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
- v4l2_clk_unregister(pcdev->mclk_clk);
+ v4l2_async_notifier_unregister(&pcdev->notifier);
+
+ if (pcdev->mclk_clk) {
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ pcdev->mclk_clk = NULL;
+ }
+
v4l2_device_unregister(&pcdev->v4l2_dev);
dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 30c148b9d65e..06e2cfd09855 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
static void channel_swdemux_tsklet(unsigned long data)
{
struct channel_info *channel = (struct channel_info *)data;
- struct c8sectpfei *fei = channel->fei;
+ struct c8sectpfei *fei;
unsigned long wp, rp;
int pos, num_packets, n, size;
u8 *buf;
@@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data)
if (unlikely(!channel || !channel->irec))
return;
+ fei = channel->fei;
+
wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index cd209dccff1b..8e2aa3f8e52f 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -596,6 +596,7 @@ int vsp1_drm_init(struct vsp1_device *vsp1)
pipe->bru = &vsp1->bru->entity;
pipe->lif = &vsp1->lif->entity;
pipe->output = vsp1->wpf[0];
+ pipe->output->pipe = pipe;
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 57c713a4e1df..4ac1ff482a0b 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -509,7 +509,13 @@ static int __maybe_unused vsp1_pm_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
- vsp1_pipelines_suspend(vsp1);
+ /*
+ * When used as part of a display pipeline, the VSP is stopped and
+ * restarted explicitly by the DU.
+ */
+ if (!vsp1->drm)
+ vsp1_pipelines_suspend(vsp1);
+
pm_runtime_force_suspend(vsp1->dev);
return 0;
@@ -520,7 +526,13 @@ static int __maybe_unused vsp1_pm_resume(struct device *dev)
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
pm_runtime_force_resume(vsp1->dev);
- vsp1_pipelines_resume(vsp1);
+
+ /*
+ * When used as part of a display pipeline, the VSP is stopped and
+ * restarted explicitly by the DU.
+ */
+ if (!vsp1->drm)
+ vsp1_pipelines_resume(vsp1);
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index d351b9c768d2..743aa0febc09 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -792,6 +792,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
struct vsp1_pipeline *pipe = video->rwpf->pipe;
+ bool start_pipeline = false;
unsigned long flags;
int ret;
@@ -802,11 +803,23 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
mutex_unlock(&pipe->lock);
return ret;
}
+
+ start_pipeline = true;
}
pipe->stream_count++;
mutex_unlock(&pipe->lock);
+ /*
+ * vsp1_pipeline_ready() is not sufficient to establish that all streams
+ * are prepared and the pipeline is configured, as multiple streams
+ * can race through streamon with buffers already queued; Therefore we
+ * don't even attempt to start the pipeline until the last stream has
+ * called through here.
+ */
+ if (!start_pipeline)
+ return 0;
+
spin_lock_irqsave(&pipe->irqlock, flags);
if (vsp1_pipeline_ready(pipe))
vsp1_video_pipeline_run(pipe);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index db525cdfac88..d9f88a4a96bd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1381,8 +1381,13 @@ static int mceusb_dev_probe(struct usb_interface *intf,
goto rc_dev_fail;
/* wire up inbound data handler */
- usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
- mceusb_dev_recv, ir, ep_in->bInterval);
+ if (usb_endpoint_xfer_int(ep_in))
+ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir, ep_in->bInterval);
+ else
+ usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir);
+
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 9caea8344547..d793c630f1dd 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct camera_data *cam = video_drvdata(file);
if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- buf->index > cam->num_frames)
+ buf->index >= cam->num_frames)
return -EINVAL;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
@@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
buf->memory != V4L2_MEMORY_MMAP ||
- buf->index > cam->num_frames)
+ buf->index >= cam->num_frames)
return -EINVAL;
DBG("QBUF #%d\n", buf->index);
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b014d7ad..2dcc8d0be9e7 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
return 1;
@@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
return 1;
@@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
return 1;
- *val = (u8) tmpval;
+ *val = tmpval;
return 0;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae16291..1c48f2f1e14a 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
return 0;
}
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
{
struct usb_device *udev = dev->udev;
unsigned char *buf;
@@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
sizeof(u8),
500);
if (ret >= 0)
- memcpy(value, buf, sizeof(u8));
+ *value = *buf;
kfree(buf);
return ret;
@@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
static int stk_start_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i, ret;
- int value_116, value_117;
+ u8 value_116, value_117;
+
if (!is_present(dev))
return -ENODEV;
@@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
static int stk_stop_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i;
if (is_present(dev)) {
stk_camera_read_reg(dev, 0x0100, &value);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d9bfdd..92bb48e3c74e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@ struct stk_camera {
#define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
int stk_sensor_init(struct stk_camera *);
int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 0324633ede42..e56a49a5e8b1 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -109,6 +109,8 @@ static int usbtv_probe(struct usb_interface *intf,
return 0;
usbtv_audio_fail:
+ /* we must not free at this point */
+ usb_get_dev(usbtv->udev);
usbtv_video_free(usbtv);
usbtv_video_fail:
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 48a39222fdf9..a9fc64557c53 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
static int put_v4l2_window32(struct v4l2_window __user *kp,
struct v4l2_window32 __user *up)
{
- struct v4l2_clip __user *kclips = kp->clips;
+ struct v4l2_clip __user *kclips;
struct v4l2_clip32 __user *uclips;
compat_caddr_t p;
u32 clipcount;
@@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
if (!clipcount)
return 0;
+ if (get_user(kclips, &kp->clips))
+ return -EFAULT;
if (get_user(p, &up->clips))
return -EFAULT;
uclips = compat_ptr(p);
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 1db0af6c7f94..b6189a4958c5 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+ dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+ dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 9ccf7f5e0e2e..4299ce06c25b 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -334,6 +334,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
struct vb2_buffer *vb;
int ret;
+ /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+ num_buffers = min_t(unsigned int, num_buffers,
+ VB2_MAX_FRAME - q->num_buffers);
+
for (buffer = 0; buffer < num_buffers; ++buffer) {
/* Allocate videobuf buffer structures */
vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667acde4..00dff9b5a6c4 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 8f8bacb67a15..a6b5259ffbdd 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,20 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ struct device_node *np = palmas_dev->dev->of_node;
+
+ if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
+ addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
+ PALMAS_PRIMARY_SECONDARY_PAD2);
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+
+ ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ if (ret)
+ dev_err(palmas_dev->dev,
+ "Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
+ ret);
+ }
if (!palmas_dev)
return;
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 31983366090a..2bf79ba4a39e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -61,6 +61,8 @@ lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
+KCOV_INSTRUMENT_lkdtm_rodata.o := n
+
OBJCOPYFLAGS :=
OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
--set-section-flags .text=alloc,readonly \
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index c63d61e17d56..381a9a166f93 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -401,8 +401,10 @@ static int device_open(struct inode *inode, struct file *file)
if (down_interruptible(&sem) != 0)
return -EPERM;
- if (!(adapter = get_cxl_adapter(adapter_num)))
- return -ENODEV;
+ if (!(adapter = get_cxl_adapter(adapter_num))) {
+ rc = -ENODEV;
+ goto err_unlock;
+ }
file->private_data = adapter;
continue_token = 0;
@@ -446,6 +448,8 @@ err1:
free_page((unsigned long) le);
err:
put_device(&adapter->dev);
+err_unlock:
+ up(&sem);
return rc;
}
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index cc91f7b3d90c..eb29113e0bac 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components,
for (i = 0; i < components; i++) {
edev->component[i].number = -1;
edev->component[i].slot = -1;
- edev->component[i].power_status = 1;
+ edev->component[i].power_status = -1;
}
mutex_lock(&container_list_lock);
@@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev,
if (edev->cb->get_power_status)
edev->cb->get_power_status(edev, ecomp);
+
+ /* If still uninitialized, the callback failed or does not exist. */
+ if (ecomp->power_status == -1)
+ return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
+
return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 41f318631c6d..60f5a8ded8dd 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -551,7 +551,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
break;
default:
- dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
rets = -ENOIOCTLCMD;
}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f84a4275ca29..f735ab4ba84e 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
size_t pas_size;
size_t vas_size;
size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages >
(SIZE_MAX - queue_size) /
(sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
struct vmci_queue *queue;
size_t queue_page_size;
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
sizeof(*queue->kernel_if->u.h.page))
return NULL;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2553d903a82b..cff5829790c9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2974,6 +2974,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index f81f4175f49a..d382dbd44635 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -490,6 +490,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
(sizeof(struct idmac_desc_64addr) *
(i + 1))) >> 32;
/* Initialize reserved and buffer size fields to "0" */
+ p->des0 = 0;
p->des1 = 0;
p->des2 = 0;
p->des3 = 0;
@@ -512,6 +513,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
+ p->des0 = 0;
p->des1 = 0;
}
@@ -2878,8 +2880,8 @@ static bool dw_mci_reset(struct dw_mci *host)
}
if (host->use_dma == TRANS_MODE_IDMAC)
- /* It is also recommended that we reset and reprogram idmac */
- dw_mci_idmac_reset(host);
+ /* It is also required that we reinit idmac */
+ dw_mci_idmac_init(host);
ret = true;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 684087db170b..1752007397f9 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
host->irq_mask &= ~irq;
else
host->irq_mask |= irq;
- spin_unlock_irqrestore(&host->lock, flags);
writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5f2f24a7360d..a082aa330f47 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1762,8 +1762,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
*/
if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
struct pinctrl *p = devm_pinctrl_get(host->dev);
- if (!p) {
- ret = -ENODEV;
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
goto err_free_irq;
}
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 3c27401cf7fe..a51d636c2312 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -432,6 +432,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
+ /*
+ * Limit SD clock to 167MHz for ls1046a according to its datasheet
+ */
+ if (clock > 167000000 &&
+ of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
+ clock = 167000000;
+
+ /*
+ * Limit SD clock to 125MHz for ls1012a according to its datasheet
+ */
+ if (clock > 125000000 &&
+ of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
+ clock = 125000000;
+
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
if (clock > 20000000)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index b0b9ceb0ab01..cfb794766fea 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -492,6 +492,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
+ /* Advertise 2.0v for compatibility with the SDIO card's OCR */
+ slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7d275e72903a..44ea9d88651f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1404,6 +1404,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5e1b68cbcd0a..e1b603ca0170 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
extp->MinorVersion = '1';
}
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
+ status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9dca881bb378..107c05b3ddbb 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -812,9 +812,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2263,6 +2264,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
@@ -2352,6 +2354,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d132b1..b479bd81120b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
do {
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
+ if (ofs >= map->size)
+ return 0;
result = map_read(map, base + ofs);
bank++;
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 2a47a3f0e730..b4092eab53ac 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -487,7 +487,7 @@ static int shrink_ecclayout(struct mtd_info *mtd,
for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
u32 eccpos;
- ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
+ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
if (ret < 0) {
if (ret != -ERANGE)
return ret;
@@ -534,7 +534,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
u32 eccpos;
- ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
+ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
if (ret < 0) {
if (ret != -ERANGE)
return ret;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 1a4a790054e4..ef9a6b22c9fa 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1763,7 +1763,7 @@ try_dmaread:
err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
addr);
/* erased page bitflips corrected */
- if (err > 0)
+ if (err >= 0)
return err;
}
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index d1570f512f0b..2f6b55229d5b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -201,14 +201,9 @@ static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
/* returns nonzero if entire page is blank */
static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
- u32 *eccstat, unsigned int bufnum)
+ u32 eccstat, unsigned int bufnum)
{
- u32 reg = eccstat[bufnum / 4];
- int errors;
-
- errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
-
- return errors;
+ return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
}
/*
@@ -221,7 +216,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
- u32 eccstat[4];
+ u32 eccstat;
int i;
/* set the chip select for NAND Transaction */
@@ -256,19 +251,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
if (nctrl->eccread) {
int errors;
int bufnum = nctrl->page & priv->bufnum_mask;
- int sector = bufnum * chip->ecc.steps;
- int sector_end = sector + chip->ecc.steps - 1;
+ int sector_start = bufnum * chip->ecc.steps;
+ int sector_end = sector_start + chip->ecc.steps - 1;
__be32 *eccstat_regs;
- if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
- eccstat_regs = ifc->ifc_nand.v2_nand_eccstat;
- else
- eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
+ eccstat_regs = ifc->ifc_nand.nand_eccstat;
+ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
- for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = ifc_in32(&eccstat_regs[i]);
+ for (i = sector_start; i <= sector_end; i++) {
+ if (i != sector_start && !(i % 4))
+ eccstat = ifc_in32(&eccstat_regs[i / 4]);
- for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
if (errors == 15) {
@@ -656,6 +649,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
u32 nand_fsr;
+ int status;
/* Use READ_STATUS command, but wait for the device to be ready */
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -670,12 +664,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
fsl_ifc_run_command(mtd);
nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
-
+ status = nand_fsr >> 24;
/*
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
- return nand_fsr | NAND_STATUS_WP;
+ return status | NAND_STATUS_WP;
}
static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -907,6 +901,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ctrl->version == FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
return 0;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 6c062b8251d2..d9dab4275859 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1059,9 +1059,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
- /* handle the block mark swapping */
- block_mark_swapping(this, payload_virt, auxiliary_virt);
-
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
@@ -1150,6 +1147,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, auxiliary_virt);
+
if (oob_required) {
/*
* It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
@@ -2047,18 +2047,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
ret = nand_boot_init(this);
if (ret)
- goto err_out;
+ goto err_nand_cleanup;
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_out;
+ goto err_nand_cleanup;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
- goto err_out;
+ goto err_nand_cleanup;
return 0;
+err_nand_cleanup:
+ nand_cleanup(chip);
err_out:
- gpmi_nand_exit(this);
+ gpmi_free_dma_buffer(this);
return ret;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 21c03086bb7f..5fb45161789c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -715,7 +715,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -744,6 +745,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -806,7 +808,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -842,6 +846,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -4780,6 +4785,11 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_free;
}
ecc->total = ecc->steps * ecc->bytes;
+ if (ecc->total > mtd->oobsize) {
+ WARN(1, "Total number of ECC bytes exceeded oobsize\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
/*
* The number of bytes available for a client to place data into
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f7758fb6..766b2c385682 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 46913ef25bc0..479a5f02d10b 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* in any case.
*/
if (mode & FMODE_WRITE) {
- ret = -EPERM;
+ ret = -EROFS;
goto out_unlock;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 85d54f37e28f..6cb5ca52cb5a 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -894,6 +894,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /*
+ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
+ * MLC NAND is different and needs special care, otherwise UBI or UBIFS
+ * will die soon and you will lose all your data.
+ */
+ if (mtd->type == MTD_MLCNANDFLASH) {
+ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b4422a..69dd21679a30 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
- flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index c1f5c29e458e..b44c8d348e78 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -828,6 +828,24 @@ static int find_fm_anchor(struct ubi_attach_info *ai)
return ret;
}
+static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *old)
+{
+ struct ubi_ainf_peb *new;
+
+ new = ubi_alloc_aeb(ai, old->pnum, old->ec);
+ if (!new)
+ return NULL;
+
+ new->vol_id = old->vol_id;
+ new->sqnum = old->sqnum;
+ new->lnum = old->lnum;
+ new->scrub = old->scrub;
+ new->copy_flag = old->copy_flag;
+
+ return new;
+}
+
/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
@@ -847,7 +865,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
- struct ubi_ainf_peb *tmp_aeb, *aeb;
+ struct ubi_ainf_peb *aeb;
int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
@@ -857,9 +875,16 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (fm_anchor < 0)
return UBI_NO_FASTMAP;
- /* Move all (possible) fastmap blocks into our new attach structure. */
- list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
- list_move_tail(&aeb->u.list, &ai->fastmap);
+ /* Copy all (possible) fastmap blocks into our new attach structure. */
+ list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
+ struct ubi_ainf_peb *new;
+
+ new = clone_aeb(ai, aeb);
+ if (!new)
+ return -ENOMEM;
+
+ list_add(&new->u.list, &ai->fastmap);
+ }
down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 7ac78c13dd1c..1bcb25bc35ef 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -328,6 +329,10 @@ out_sysfs:
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
if (do_free)
ubi_eba_destroy_table(eba_tbl);
out_acc:
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 551f0f8dead3..91d8a48e53c3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave)
+ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
return;
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
@@ -944,6 +944,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
+ netdev_dbg(slave->bond->dev,
+ "Send learning packet: dev %s mac %pM vlan %d\n",
+ slave->dev->name, mac_addr, vid);
+
if (vid)
__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
@@ -966,14 +970,13 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
*/
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
- if (strict_match &&
- ether_addr_equal_64bits(mac_addr,
- upper->dev_addr)) {
+ if (is_vlan_dev(upper) &&
+ bond->nest_level == vlan_get_encap_level(upper) - 1) {
+ if (upper->addr_assign_type == NET_ADDR_STOLEN) {
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
- } else if (!strict_match) {
+ } else {
alb_send_lp_vid(slave, upper->dev_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 63d61c084815..1a139d0f2232 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -371,9 +371,10 @@ down:
/* Get link speed and duplex from the slave's base driver
* using ethtool. If for some reason the call fails or the
* values are invalid, set speed and duplex to -1,
- * and return.
+ * and return. Return 1 if speed or duplex settings are
+ * UNKNOWN; 0 otherwise.
*/
-static void bond_update_speed_duplex(struct slave *slave)
+static int bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
struct ethtool_link_ksettings ecmd;
@@ -383,24 +384,27 @@ static void bond_update_speed_duplex(struct slave *slave)
slave->duplex = DUPLEX_UNKNOWN;
res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
- if (res < 0)
- return;
-
- if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
- return;
-
+ if (res < 0) {
+ slave->link = BOND_LINK_DOWN;
+ return 1;
+ }
+ if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) {
+ slave->link = BOND_LINK_DOWN;
+ return 1;
+ }
switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
default:
- return;
+ slave->link = BOND_LINK_DOWN;
+ return 1;
}
slave->speed = ecmd.base.speed;
slave->duplex = ecmd.base.duplex;
- return;
+ return 0;
}
const char *bond_slave_link_status(s8 link)
@@ -1520,39 +1524,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- /* If the mode uses primary, then the following is handled by
- * bond_change_active_slave().
- */
- if (!bond_uses_primary(bond)) {
- /* set promiscuity level to new slave */
- if (bond_dev->flags & IFF_PROMISC) {
- res = dev_set_promiscuity(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- /* set allmulti level to new slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
- res = dev_set_allmulti(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- netif_addr_lock_bh(bond_dev);
-
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
-
- netif_addr_unlock_bh(bond_dev);
- }
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
- }
-
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1683,8 +1654,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
@@ -1715,6 +1685,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_upper_unlink;
}
+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+ if (!bond_uses_primary(bond)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ res = dev_set_promiscuity(slave_dev, 1);
+ if (res)
+ goto err_sysfs_del;
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ res = dev_set_allmulti(slave_dev, 1);
+ if (res) {
+ if (bond_dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+ goto err_sysfs_del;
+ }
+ }
+
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast);
+ }
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
@@ -1728,6 +1732,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
+ bond->nest_level = dev_get_nest_level(bond_dev);
+
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
slave_dev->name,
bond_is_active_slave(new_slave) ? "an active" : "a backup",
@@ -1738,6 +1744,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
+err_sysfs_del:
+ bond_sysfs_slave_del(new_slave);
+
err_upper_unlink:
bond_upper_dev_unlink(bond, new_slave);
@@ -1745,9 +1754,6 @@ err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
- if (!bond_uses_primary(bond))
- bond_hw_addr_flush(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
@@ -2063,6 +2069,7 @@ static int bond_miimon_inspect(struct bonding *bond)
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
slave->dev->name);
+ commit++;
continue;
}
@@ -2100,7 +2107,7 @@ static int bond_miimon_inspect(struct bonding *bond)
(bond->params.updelay - slave->delay) *
bond->params.miimon,
slave->dev->name);
-
+ commit++;
continue;
}
@@ -2600,11 +2607,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
+ slave->new_link = BOND_LINK_NOCHANGE;
+
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
- slave->link = BOND_LINK_UP;
+ slave->new_link = BOND_LINK_UP;
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
@@ -2631,7 +2640,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
- slave->link = BOND_LINK_DOWN;
+ slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
@@ -2662,6 +2671,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!rtnl_trylock())
goto re_arm;
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->new_link != BOND_LINK_NOCHANGE)
+ slave->link = slave->new_link;
+ }
+
if (slave_state_changed) {
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
@@ -3327,12 +3341,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
+ s64 delta = nv - ov;
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
- res[i] += (u32)nv - (u32)ov;
- else
- res[i] += nv - ov;
+ delta = (s64)(s32)((u32)nv - (u32)ov);
+
+ /* filter anomalies, some drivers reset their stats
+ * at down/up events.
+ */
+ if (delta > 0)
+ res[i] += delta;
}
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f3..6da69af103e6 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
return 0;
}
-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static void cc770_tx(struct net_device *dev, int mo)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
u8 dlc, rtr;
u32 id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
- if ((cc770_read_reg(priv,
- msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
- netdev_err(dev, "TX register is still occupied!\n");
- return NETDEV_TX_BUSY;
- }
-
- netif_stop_queue(dev);
-
dlc = cf->can_dlc;
id = cf->can_id;
- if (cf->can_id & CAN_RTR_FLAG)
- rtr = 0;
- else
- rtr = MSGCFG_DIR;
+ rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
cc770_write_reg(priv, msgobj[mo].ctrl1,
RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+
if (id & CAN_EFF_FLAG) {
id &= CAN_EFF_MASK;
cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < dlc; i++)
cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
- /* Store echo skb before starting the transfer */
- can_put_echo_skb(skb, dev, 0);
-
cc770_write_reg(priv, msgobj[mo].ctrl1,
- RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
+}
- stats->tx_bytes += dlc;
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
- /*
- * HM: We had some cases of repeated IRQs so make sure the
- * INT is acknowledged I know it's already further up, but
- * doing again fixed the issue
- */
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ netif_stop_queue(dev);
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ priv->tx_skb = skb;
+ cc770_tx(dev, mo);
return NETDEV_TX_OK;
}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
+ struct can_frame *cf;
+ u8 ctrl1;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
- /* Nothing more to send, switch off interrupts */
cc770_write_reg(priv, msgobj[mo].ctrl0,
MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
- /*
- * We had some cases of repeated IRQ so make sure the
- * INT is acknowledged
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
+
+ if (unlikely(!priv->tx_skb)) {
+ netdev_err(dev, "missing tx skb in tx interrupt\n");
+ return;
+ }
+
+ if (unlikely(ctrl1 & MSGLST_SET)) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* When the CC770 is sending an RTR message and it receives a regular
+ * message that matches the id of the RTR message, it will overwrite the
+ * outgoing message in the TX register. When this happens we must
+ * process the received message and try to transmit the outgoing skb
+ * again.
*/
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ if (unlikely(ctrl1 & NEWDAT_SET)) {
+ cc770_rx(dev, mo, ctrl1);
+ cc770_tx(dev, mo);
+ return;
+ }
+ cf = (struct can_frame *)priv->tx_skb->data;
+ stats->tx_bytes += cf->can_dlc;
stats->tx_packets++;
+
+ can_put_echo_skb(priv->tx_skb, dev, 0);
can_get_echo_skb(dev, 0);
+ priv->tx_skb = NULL;
+
netif_wake_queue(dev);
}
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
priv->can.do_set_bittiming = cc770_set_bittiming;
priv->can.do_set_mode = cc770_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->tx_skb = NULL;
memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
u8 bus_config; /* Bus conffiguration register */
+
+ struct sk_buff *tx_skb;
};
struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 16f7cadda5c3..47f43bdecd51 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -493,7 +493,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
data = be32_to_cpup((__be32 *)&cf->data[0]);
flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]);
}
- if (cf->can_dlc > 3) {
+ if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]);
}
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index c06ef438f23f..6c676403f823 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -30,6 +30,7 @@
#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
#define IFI_CANFD_STCMD_BUSOFF BIT(4)
+#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
@@ -52,7 +53,10 @@
#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
#define IFI_CANFD_INTERRUPT 0xc
+#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
+#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
@@ -61,6 +65,10 @@
#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
#define IFI_CANFD_IRQMASK 0x10
+#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
+#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
+#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
+#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
@@ -136,6 +144,8 @@
#define IFI_CANFD_SYSCLOCK 0x50
#define IFI_CANFD_VER 0x54
+#define IFI_CANFD_VER_REV_MASK 0xff
+#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
#define IFI_CANFD_IP_ID 0x58
#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
if (enable) {
enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
- IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+ IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
+ IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
+ IFI_CANFD_IRQMASK_ERROR_WARNING |
+ IFI_CANFD_IRQMASK_ERROR_BUSOFF;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
}
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
return 1;
}
-static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+static int ifi_canfd_handle_lec_err(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
+ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
+ /* error active state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
ifi_canfd_get_berr_counter(ndev, &bec);
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
return 1;
}
-static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd)
+static int ifi_canfd_handle_state_errors(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
int work_done = 0;
- u32 isr;
- /*
- * The ErrWarn condition is a little special, since the bit is
- * located in the INTERRUPT register instead of STCMD register.
- */
- isr = readl(priv->base + IFI_CANFD_INTERRUPT);
- if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) &&
+ if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
+ (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
+ netdev_dbg(ndev, "Error, entered active state\n");
+ work_done += ifi_canfd_handle_state_change(ndev,
+ CAN_STATE_ERROR_ACTIVE);
+ }
+
+ if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
(priv->can.state != CAN_STATE_ERROR_WARNING)) {
- /* Clear the interrupt */
- writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
- priv->base + IFI_CANFD_INTERRUPT);
netdev_dbg(ndev, "Error, entered warning state\n");
work_done += ifi_canfd_handle_state_change(ndev,
CAN_STATE_ERROR_WARNING);
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
{
struct net_device *ndev = napi->dev;
struct ifi_canfd_priv *priv = netdev_priv(ndev);
- const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
- IFI_CANFD_STCMD_BUSOFF;
- int work_done = 0;
-
- u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
- u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
+ int work_done = 0;
/* Handle bus state changes */
- if ((stcmd & stcmd_state_mask) ||
- ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
- work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
+ work_done += ifi_canfd_handle_state_errors(ndev);
/* Handle lost messages on RX */
if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
/* Handle lec errors on the bus */
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+ work_done += ifi_canfd_handle_lec_err(ndev);
/* Handle normal messages on RX */
if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
struct net_device_stats *stats = &ndev->stats;
const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+ IFI_CANFD_INTERRUPT_ERROR_COUNTER |
+ IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
IFI_CANFD_INTERRUPT_ERROR_WARNING |
- IFI_CANFD_INTERRUPT_ERROR_COUNTER;
+ IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
- const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
- IFI_CANFD_INTERRUPT_ERROR_WARNING));
+ const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
u32 isr;
isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
int irq, ret;
- u32 id;
+ u32 id, rev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
return -EINVAL;
}
+ rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
+ if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
+ dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
+ rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
+ return -EINVAL;
+ }
+
ndev = alloc_candev(sizeof(*priv), 1);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index c9d61a6dfb7a..3a75352f632b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
- stats->tx_dropped++;
+ stats->rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index e2512ab41168..e13c9cd45dc0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -61,6 +61,8 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail, admin_queue->sq.head,
- admin_queue->q_depth);
+ pr_debug("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags;
- u32 start_time;
+ unsigned long flags, timeout;
int ret;
- start_time = ((u32)jiffies_to_usecs(jiffies));
+ timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
+
+ while (1) {
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
- while (comp_ctx->status == ENA_CMD_SUBMITTED) {
- if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
- ADMIN_CMD_TIMEOUT_US) {
+ if (time_is_before_jiffies(timeout)) {
pr_err("Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- spin_lock_irqsave(&admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- spin_unlock_irqrestore(&admin_queue->q_lock, flags);
-
msleep(100);
}
@@ -1449,6 +1450,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index bfeaec5bd7b9..0d9ce08ee3a9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1542,6 +1542,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
"Failed to get TX queue handlers. TX queue num %d rc: %d\n",
qid, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1606,6 +1607,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
qid, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -2806,6 +2808,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
int release_bars;
+ if (ena_dev->mem_bar)
+ devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+
+ devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
}
@@ -2893,8 +2900,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_ena_dev;
}
- ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
- pci_resource_len(pdev, ENA_REG_BAR));
+ ena_dev->reg_bar = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, ENA_REG_BAR),
+ pci_resource_len(pdev, ENA_REG_BAR));
if (!ena_dev->reg_bar) {
dev_err(&pdev->dev, "failed to remap regs bar\n");
rc = -EFAULT;
@@ -2914,8 +2922,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
- pci_resource_len(pdev, ENA_MEM_BAR));
+ ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
if (!ena_dev->mem_bar) {
rc = -EFAULT;
goto err_device_destroy;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 5390ae89136c..71611bd6384b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -560,6 +560,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+ CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 06e598c8bc16..c82faf1a88b8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -163,6 +163,7 @@ enum xgene_enet_rm {
#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
+#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 8158d4698734..fca2e428cd86 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -505,14 +505,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void xgene_enet_skip_csum(struct sk_buff *skb)
+static void xgene_enet_rx_csum(struct sk_buff *skb)
{
+ struct net_device *ndev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
- if (!ip_is_fragment(iph) ||
- (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ if (ip_is_fragment(iph))
+ return;
+
+ if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+ return;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
@@ -537,9 +547,9 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
buf_pool->rx_skb[skb_index] = NULL;
/* checking for error */
- status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
- if (unlikely(status > 2)) {
+ if (unlikely(status)) {
dev_kfree_skb_any(skb);
xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
status);
@@ -555,10 +565,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, ndev);
- if (likely((ndev->features & NETIF_F_IP_CSUM) &&
- skb->protocol == htons(ETH_P_IP))) {
- xgene_enet_skip_csum(skb);
- }
+ xgene_enet_rx_csum(skb);
rx_ring->rx_packets++;
rx_ring->rx_bytes += datalen;
@@ -1673,6 +1680,30 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+ { "APMC0D05", XGENE_ENET1},
+ { "APMC0D30", XGENE_ENET1},
+ { "APMC0D31", XGENE_ENET1},
+ { "APMC0D3F", XGENE_ENET1},
+ { "APMC0D26", XGENE_ENET2},
+ { "APMC0D25", XGENE_ENET2},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static const struct of_device_id xgene_enet_of_match[] = {
+ {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+ {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
+
static int xgene_enet_probe(struct platform_device *pdev)
{
struct net_device *ndev;
@@ -1725,7 +1756,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_setup_ops(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
- ndev->features |= NETIF_F_TSO;
+ ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
@@ -1819,32 +1850,6 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
xgene_enet_remove(pdev);
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_enet_acpi_match[] = {
- { "APMC0D05", XGENE_ENET1},
- { "APMC0D30", XGENE_ENET1},
- { "APMC0D31", XGENE_ENET1},
- { "APMC0D3F", XGENE_ENET1},
- { "APMC0D26", XGENE_ENET2},
- { "APMC0D25", XGENE_ENET2},
- { }
-};
-MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_enet_of_match[] = {
- {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
- {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
- {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
- {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
- {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
- {},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
-#endif
-
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index be865b4dada2..aba4853e6b70 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- pktlen = info & LEN_MASK;
- stats->rx_packets++;
- stats->rx_bytes += pktlen;
- skb = rx_buff->skb;
- skb_put(skb, pktlen);
- skb->dev = ndev;
- skb->protocol = eth_type_trans(skb, ndev);
-
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
- dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
- /* Prepare the BD for next cycle */
- rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
- EMAC_BUFFER_SIZE);
- if (unlikely(!rx_buff->skb)) {
+ /* Prepare the BD for next cycle. netif_receive_skb()
+ * only if new skb was allocated and mapped to avoid holes
+ * in the RX fifo.
+ */
+ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "cannot allocate skb\n");
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
- /* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
- /* receive_skb only if new skb was allocated to avoid holes */
- netif_receive_skb(skb);
-
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(&ndev->dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
- netdev_err(ndev, "cannot dma map\n");
- dev_kfree_skb(rx_buff->skb);
+ netdev_err(ndev, "cannot map dma buffer\n");
+ dev_kfree_skb(skb);
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
+ stats->rx_dropped++;
continue;
}
+
+ /* unmap previosly mapped skb */
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+ pktlen = info & LEN_MASK;
+ stats->rx_packets++;
+ stats->rx_bytes += pktlen;
+ skb_put(rx_buff->skb, pktlen);
+ rx_buff->skb->dev = ndev;
+ rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+ netif_receive_skb(rx_buff->skb);
+
+ rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c6163874e4e7..c770ca37c9b2 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* Optional regulator for PHY */
priv->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(priv->regulator)) {
- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out_clk_disable;
+ }
dev_err(dev, "no regulator found\n");
priv->regulator = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 744ed6ddaf37..91fbba58d033 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -707,37 +707,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
struct net_device *ndev = priv->netdev;
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);
- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}
ring->c_index = c_index;
@@ -1207,6 +1203,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 1c82e3da69a7..07b0aaa98de0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index c16ec3a51876..1ee11b600645 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_net.h>
#include "bgmac.h"
static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
@@ -96,7 +97,7 @@ static int bgmac_probe(struct bcma_device *core)
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
- u8 *mac;
+ const u8 *mac = NULL;
int err;
bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
@@ -110,21 +111,27 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- switch (core->core_unit) {
- case 0:
- mac = sprom->et0mac;
- break;
- case 1:
- mac = sprom->et1mac;
- break;
- case 2:
- mac = sprom->et2mac;
- break;
- default:
- dev_err(bgmac->dev, "Unsupported core_unit %d\n",
- core->core_unit);
- err = -ENOTSUPP;
- goto err;
+ if (bgmac->dev->of_node)
+ mac = of_get_mac_address(bgmac->dev->of_node);
+
+ /* If no MAC address assigned via device tree, check SPROM */
+ if (!mac) {
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
+ dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+ core->core_unit);
+ err = -ENOTSUPP;
+ goto err;
+ }
}
ether_addr_copy(bgmac->mac_addr, mac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0a5ee1d973ac..31287cec6e3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2026,6 +2026,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVREHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
+ fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
@@ -3034,7 +3035,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- if (IS_PF(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bnx2x_drv_pulse(bp);
@@ -3120,7 +3121,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->cnic_loaded = false;
/* Clear driver version indication in shmem */
- if (IS_PF(bp))
+ if (IS_PF(bp) && !BP_NOMCP(bp))
bnx2x_update_mng_version(bp);
/* Check if there are pending parity attentions. If there are - set
@@ -3886,15 +3887,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
+ u16 vlan_tci = 0;
#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
#endif
- tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(ntohs(eth->h_proto));
+ /* Still need to consider inband vlan for enforced */
+ if (__vlan_get_tag(skb, &vlan_tci)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ } else {
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tci);
+ }
#ifndef BNX2X_STOP_ON_ERROR
- else
+ } else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
#endif
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5d958b5bb8b1..554c4086b3c6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
do {
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+ /* If we read all 0xFFs, means we are in PCI error state and
+ * should bail out to avoid crashes on adapter's FW reads.
+ */
+ if (bp->common.shmem_base == 0xFFFFFFFF) {
+ bp->flags |= NO_MCP_FLAG;
+ return -ENODEV;
+ }
+
if (bp->common.shmem_base) {
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if (val & SHR_MEM_VALIDITY_MB)
@@ -14312,7 +14321,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
BNX2X_ERR("IO slot reset --> driver unload\n");
/* MCP should have been reset; Need to wait for validity */
- bnx2x_init_shmem(bp);
+ if (bnx2x_init_shmem(bp)) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index bbb3641eddcb..3aa993bbafd9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1498,12 +1498,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (BNXT_VF(bp))
goto async_event_process_exit;
- if (data1 & 0x20000) {
+
+ /* print unsupported speed warning in forced speed mode only */
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
+ (data1 & 0x20000)) {
u16 fw_speed = link_info->force_link_speed;
u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
- netdev_warn(bp->dev, "Link speed %d no longer supported\n",
- speed);
+ if (speed != SPEED_UNKNOWN)
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 60e2af8678bd..393cce3bf2fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -68,7 +68,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
- if (vf_id >= bp->pf.max_vfs) {
+ if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bb22d325e965..4ffbe850d746 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,14 +8720,15 @@ static void tg3_free_consistent(struct tg3 *tp)
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
- tg3_full_lock(tp, 0);
+ /* tp->hw_stats can be referenced safely:
+ * 1. under rtnl_lock
+ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+ */
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
- tg3_full_unlock(tp);
}
/*
@@ -10049,6 +10050,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tw32(GRC_MODE, tp->grc_mode | val);
+ /* On one of the AMD platform, MRRS is restricted to 4000 because of
+ * south bridge limitation. As a workaround, Driver is setting MRRS
+ * to 2048 instead of default 4096.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+ val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+ tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+ }
+
/* Setup the timer prescalar register. Clock is always 66Mhz. */
val = tr32(GRC_MISC_CFG);
val &= ~0xff;
@@ -14151,7 +14162,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
struct tg3 *tp = netdev_priv(dev);
spin_lock_bh(&tp->lock);
- if (!tp->hw_stats) {
+ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
return stats;
@@ -14228,7 +14239,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5717 ||
- tg3_asic_rev(tp) == ASIC_REV_5719)
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 3b5e98ecba00..6e51b793615c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -95,6 +95,7 @@
#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -280,6 +281,9 @@
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
+#define MAX_READ_REQ_SIZE_2048 0x00004000
+#define MAX_READ_REQ_MASK 0x00007000
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0f6811860ad5..a36e38676640 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
static void
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8a37012c9c89..c75d4ea9342b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1576,6 +1576,11 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->pdev = pdev;
nic->pnicvf = nic;
nic->max_queues = qcount;
+ /* If no of CPUs are too low, there won't be any queues left
+ * for XDP_TX, hence double it.
+ */
+ if (!nic->t88)
+ nic->max_queues *= 2;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0c2a32a305bc..3ec32d7c5866 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2742,6 +2742,16 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return -EOPNOTSUPP;
}
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ /* Disable GRO, if RX_CSUM is disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_GRO;
+
+ return features;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -2766,6 +2776,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9e073fb6870a..ebeeb3581b9c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2596,7 +2596,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_SIZE 0x800
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
@@ -2634,15 +2633,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
- /* We have two VPD data structures stored in the adapter VPD area.
- * By default, Linux calculates the size of the VPD area by traversing
- * the first VPD area at offset 0x0, so we need to tell the OS what
- * our real VPD size is.
- */
- ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
- if (ret < 0)
- goto out;
-
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
@@ -6195,13 +6185,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
+ /* Disable FW_OK flag so that mbox commands with FW_OK flag set
+ * wont be sent when we are flashing FW.
+ */
+ adap->flags &= ~FW_OK;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
- return ret;
+ goto out;
ret = t4_load_fw(adap, fw_data, size);
if (ret < 0)
- return ret;
+ goto out;
/*
* Older versions of the firmware don't understand the new
@@ -6212,7 +6207,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* its header flags to see if it advertises the capability.
*/
reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
- return t4_fw_restart(adap, mbox, reset);
+ ret = t4_fw_restart(adap, mbox, reset);
+
+ /* Grab potentially new Firmware Device Log parameters so we can see
+ * how healthy the new Firmware is. It's okay to contact the new
+ * Firmware for these parameters even though, as far as it's
+ * concerned, we've never said "HELLO" to it ...
+ */
+ (void)t4_init_devlog_params(adap);
+out:
+ adap->flags |= FW_OK;
+ return ret;
}
/**
@@ -8083,7 +8088,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
if (ret)
break;
- idx = (idx + 1) & UPDBGLARDPTR_M;
+
+ /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+ * identify the 32-bit portion of the full 312-bit data
+ */
+ if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
+ idx = (idx & 0xff0) + 0x10;
+ else
+ idx++;
+ /* address can't exceed 0xfff */
+ idx &= UPDBGLARDPTR_M;
}
restart:
if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f3ed9ce99e5e..9d64e8e7c417 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2616,8 +2616,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
/*
@@ -2625,9 +2625,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2639,8 +2650,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 262587240c86..0437149f5939 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <net/ip.h>
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 917091871259..fe00f71bc6b4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2371,6 +2371,10 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
{
}
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
#endif /* !defined(CONFIG_M5272) */
static int fec_enet_nway_reset(struct net_device *dev)
@@ -3209,7 +3213,7 @@ static int fec_enet_init(struct net_device *ndev)
}
#ifdef CONFIG_OF
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
bool active_high = false;
@@ -3217,7 +3221,7 @@ static void fec_reset_phy(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
if (!np)
- return;
+ return 0;
of_property_read_u32(np, "phy-reset-duration", &msec);
/* A sane reset duration should not be longer than 1s */
@@ -3225,8 +3229,10 @@ static void fec_reset_phy(struct platform_device *pdev)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (!gpio_is_valid(phy_reset))
- return;
+ if (phy_reset == -EPROBE_DEFER)
+ return phy_reset;
+ else if (!gpio_is_valid(phy_reset))
+ return 0;
active_high = of_property_read_bool(np, "phy-reset-active-high");
@@ -3235,7 +3241,7 @@ static void fec_reset_phy(struct platform_device *pdev)
"phy-reset");
if (err) {
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return;
+ return err;
}
if (msec > 20)
@@ -3244,14 +3250,17 @@ static void fec_reset_phy(struct platform_device *pdev)
usleep_range(msec * 1000, msec * 1000 + 1000);
gpio_set_value_cansleep(phy_reset, !active_high);
+
+ return 0;
}
#else /* CONFIG_OF */
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
* by machine code.
*/
+ return 0;
}
#endif /* CONFIG_OF */
@@ -3422,6 +3431,7 @@ fec_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to enable phy regulator: %d\n", ret);
+ clk_disable_unprepare(fep->clk_ipg);
goto failed_regulator;
}
} else {
@@ -3434,7 +3444,9 @@ fec_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- fec_reset_phy(pdev);
+ ret = fec_reset_phy(pdev);
+ if (ret)
+ goto failed_reset;
if (fep->bufdesc_ex)
fec_ptp_init(pdev);
@@ -3495,8 +3507,10 @@ failed_init:
fec_ptp_stop(pdev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+failed_reset:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
failed_regulator:
- clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
@@ -3523,6 +3537,8 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b374ff5..a10de1e9c157 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
- const struct fsl_pq_mdio_data *data = id->data;
+ const struct fsl_pq_mdio_data *data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
struct mii_bus *new_bus;
int err;
+ if (!id) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
+
+ data = id->data;
+
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 57798814160d..ec4b6997f24a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -314,11 +314,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
+ set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
- set_fipers(etsects);
-
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index f76d33279454..ef9bc26ebc1a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2594,11 +2594,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32) immrbar_virt_to_phys(ugeth->
- p_tx_bd_ring[i]));
+ (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
last_bd_completed_address,
- (u32) immrbar_virt_to_phys(endOfRing));
+ (u32)qe_muram_dma(endOfRing));
}
}
@@ -2844,8 +2843,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32) immrbar_virt_to_phys(ugeth->
- p_rx_bd_ring[i]));
+ (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
}
/* rest of fields handled by QE */
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 2d0cb609adc3..b7c8433a7a37 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -773,8 +773,9 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
/* update the current hash->queue mappings from the shadow RSS table */
- memcpy(indir, ppe_cb->rss_indir_table,
- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+ if (indir)
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
return 0;
}
@@ -785,15 +786,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
/* set the RSS Hash Key if specififed by the user */
- if (key)
- hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
+ if (key) {
+ memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+ }
- /* update the shadow RSS table with user specified qids */
- memcpy(ppe_cb->rss_indir_table, indir,
- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+ if (indir) {
+ /* update the shadow RSS table with user specified qids */
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
- /* now update the hardware */
- hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ /* now update the hardware */
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 1e1eb92998fb..02a03bccde7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
}
-/**
-*hns_gmac_get_en - get port enable
-*@mac_drv:mac device
-*@rx:rx enable
-*@tx:tx enable
-*/
+/* hns_gmac_get_en - get port enable
+ * @mac_drv:mac device
+ * @rx:rx enable
+ * @tx:tx enable
+ */
static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index c494fc52be74..62a12991ce9a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -70,7 +70,7 @@ enum dsaf_roce_qos_sl {
};
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
-#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
+#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index f0ed80d6ef9c..f3be9ac47bfb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -430,7 +430,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
-
return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 8f4f0e8da984..d1c868c800f9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -776,7 +776,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
*/
static int hns_xgmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_xgmac_stats_string);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c06845b7b666..111e1aab7d83 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -299,9 +299,9 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
mtu);
}
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data)
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_ring *ring = ring_data->ring;
@@ -360,6 +360,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
netdev_tx_sent_queue(dev_queue, skb->len);
+ netif_trans_update(ndev);
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
@@ -511,7 +515,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
int last_offset;
bool twobufs;
- twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
+ twobufs = ((PAGE_SIZE < 8192) &&
+ hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
@@ -1407,17 +1412,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- int ret;
assert(skb->queue_mapping < ndev->ae_handle->q_num);
- ret = hns_nic_net_xmit_hw(ndev, skb,
- &tx_ring_data(priv, skb->queue_mapping));
- if (ret == NETDEV_TX_OK) {
- netif_trans_update(ndev);
- ndev->stats.tx_bytes += skb->len;
- ndev->stats.tx_packets++;
- }
- return (netdev_tx_t)ret;
+
+ return hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
}
static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
@@ -1700,7 +1699,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
{
WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
-
+ /* make sure to commit the things */
smp_mb__before_atomic();
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 5b412de350aa..7bc6a6ecd666 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -91,8 +91,8 @@ void hns_ethtool_set_ops(struct net_device *ndev);
void hns_nic_net_reset(struct net_device *ndev);
void hns_nic_net_reinit(struct net_device *netdev);
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data);
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 87d5c94b2810..6be0cae44e9b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1017,8 +1017,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
cnt--;
return cnt;
- } else {
+ } else if (stringset == ETH_SS_STATS) {
return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ } else {
+ return -EOPNOTSUPP;
}
}
@@ -1252,12 +1254,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
- /* currently hfunc can only be Toeplitz hash */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
- if (!indir)
- return 0;
+ }
return ops->set_rss(priv->ae_handle, indir, key, hfunc);
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 8f139197f1aa..5977b695d0fa 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int n = 20;
+ bool __maybe_unused try_internal_clock = false;
DBG(dev, "reset" NL);
@@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev)
}
#ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
/*
* PPC460EX/GT Embedded Processor Advanced User's Manual
* section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev)
* of the EMAC. If none is present, select the internal clock
* (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
* After a soft reset, select the external clock.
+ *
+ * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+ * ethernet cable is not attached. This causes the reset to timeout
+ * and the PHY detection code in emac_init_phy() is unable to
+ * communicate and detect the AR8035-A PHY. As a result, the emac
+ * driver bails out early and the user has no ethernet.
+ * In order to stay compatible with existing configurations, the
+ * driver will temporarily switch to the internal clock, after
+ * the first reset fails.
*/
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: select internal loop clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev)
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (!n && !try_internal_clock) {
+ /* first attempt has timed out. */
+ n = 20;
+ try_internal_clock = true;
+ goto do_retry;
+ }
+
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: restore external clock source after reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7c6c1468628b..49094c965697 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -511,6 +511,23 @@ alloc_napi_failed:
return -ENOMEM;
}
+static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ disable_irq(adapter->tx_scrq[i]->irq);
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ disable_irq(adapter->rx_scrq[i]->irq);
+ }
+}
+
static int ibmvnic_close(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -519,6 +536,7 @@ static int ibmvnic_close(struct net_device *netdev)
int i;
adapter->closing = true;
+ disable_sub_crqs(adapter);
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
- __E1000_DOWN
+ __E1000_DOWN,
+ __E1000_DISABLED
};
#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f42129d09e2c..dd112aa5cebb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -940,7 +940,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct e1000_adapter *adapter;
+ struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found;
@@ -950,6 +950,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
+ bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@@ -1250,11 +1251,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
- pci_disable_device(pdev);
+ if (!adapter || disable_dev)
+ pci_disable_device(pdev);
return err;
}
@@ -1272,6 +1275,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@@ -1290,9 +1294,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
}
/**
@@ -5166,7 +5172,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
return 0;
}
@@ -5210,6 +5217,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5284,7 +5295,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -5312,6 +5325,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 0feddf3393f9..825ec8f710e7 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
struct e1000_hw *hw = &adapter->hw;
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
- skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ /* Clear the global tx_hwtstamp_skb pointer and force writes
+ * prior to notifying the stack of a Tx timestamp.
+ */
adapter->tx_hwtstamp_skb = NULL;
+ wmb(); /* force write prior to skb_tstamp_tx */
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -3528,6 +3534,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHz;
+ incvalue = INCVALUE_96MHz;
+ shift = INCVALUE_SHIFT_96MHz;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ break;
case e1000_pch_lpt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
@@ -6639,12 +6651,17 @@ static int e1000e_pm_thaw(struct device *dev)
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
- return __e1000_shutdown(pdev, false);
+ rc = __e1000_shutdown(pdev, false);
+ if (rc)
+ e1000e_pm_thaw(dev);
+
+ return rc;
}
static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 5241e0873397..7041d83d48bf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -942,7 +942,7 @@ static void fm10k_self_test(struct net_device *dev,
memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
- if (FM10K_REMOVED(hw)) {
+ if (FM10K_REMOVED(hw->hw_addr)) {
netif_err(interface, drv, dev,
"Interface removed - test blocked\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 92bc8846f1ba..f4569461dcb8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1135,6 +1135,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index becffd15c092..57c7456a5751 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11142,10 +11142,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- err = i40e_lan_add_device(pf);
- if (err)
- dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
- err);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+ }
#ifdef I40E_FCOE
/* create FCoE interface */
@@ -11323,10 +11325,11 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
/* remove attached clients */
- ret_code = i40e_lan_del_device(pf);
- if (ret_code) {
- dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
- ret_code);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
}
/* shutdown and destroy the HMC */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 954efe3118db..abe290bfc638 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c5430394fac9..2e12ccf73dba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1820,6 +1820,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
@@ -2670,10 +2671,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2681,7 +2702,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c03800d1000a..7bfed441c466 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1262,6 +1262,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
@@ -1872,10 +1873,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -1883,7 +1904,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index ddf478d6322b..614f93e01500 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -154,6 +154,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index a7895c4cbcc3..9eb9b68f8935 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -721,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
**/
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
@@ -748,10 +749,17 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
+ /* Clear the lock early before calling skb_tstamp_tx so that
+ * applications are not woken up before the lock bit is clear. We use
+ * a copy of the skb pointer to ensure other threads can't change it
+ * while we're notifying the stack.
+ */
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 508e72c5f1c2..d665de8849a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -80,7 +80,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
#define IXGBEVF_QUEUE_STATS_LEN ( \
(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
- (sizeof(struct ixgbe_stats) / sizeof(u64)))
+ (sizeof(struct ixgbevf_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 941c8e2c944e..93ab0b3ad393 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 150;
+ pdev->d3_delay = 200;
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4832223f1500..20de37a414fe 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1842,11 +1842,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* set GE2 TUNE */
regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
- /* GE1, Force 1000M/FD, FC ON */
- mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
-
- /* GE2, Force 1000M/FD, FC ON */
- mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+ /* Set linkdown as the default for each GMAC. Its own MCR would be set
+ * up with the more appropriate value when mtk_phy_link_adjust call is
+ * being invoked.
+ */
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ mtk_w32(eth, 0, MTK_MAC_MCR(i));
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b04760a5034b..af2e6ea36eac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (priv->cee_config.pfc_state) {
int tc;
+ rx_ppp = prof->rx_ppp;
+ tx_ppp = prof->tx_ppp;
- priv->prof->rx_pause = 0;
- priv->prof->tx_pause = 0;
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
- priv->prof->tx_ppp &= ~tc_mask;
- priv->prof->rx_ppp &= ~tc_mask;
+ tx_ppp &= ~tc_mask;
+ rx_ppp &= ~tc_mask;
break;
case pfc_enabled_full:
- priv->prof->tx_ppp |= tc_mask;
- priv->prof->rx_ppp |= tc_mask;
+ tx_ppp |= tc_mask;
+ rx_ppp |= tc_mask;
break;
case pfc_enabled_tx:
- priv->prof->tx_ppp |= tc_mask;
- priv->prof->rx_ppp &= ~tc_mask;
+ tx_ppp |= tc_mask;
+ rx_ppp &= ~tc_mask;
break;
case pfc_enabled_rx:
- priv->prof->tx_ppp &= ~tc_mask;
- priv->prof->rx_ppp |= tc_mask;
+ tx_ppp &= ~tc_mask;
+ rx_ppp |= tc_mask;
break;
default:
break;
}
}
- en_dbg(DRV, priv, "Set pfc on\n");
+ rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
+ tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
} else {
- priv->prof->rx_pause = 1;
- priv->prof->tx_pause = 1;
- en_dbg(DRV, priv, "Set pfc off\n");
+ rx_ppp = 0;
+ tx_ppp = 0;
+ rx_pause = prof->rx_pause;
+ tx_pause = prof->tx_pause;
}
if (mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp)) {
+ tx_pause, tx_ppp, rx_pause, rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
return 1;
}
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->tx_pause = tx_pause;
+ prof->rx_pause = rx_pause;
+
return 0;
}
@@ -310,6 +316,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
}
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +354,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ pg[i] = MLX4_EN_TC_VENDOR;
+ tc_tx_bw[i] = MLX4_EN_BW_MAX;
+ break;
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
@@ -403,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
@@ -411,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
pfc->mbc,
pfc->delay);
- prof->rx_pause = !pfc->pfc_en;
- prof->tx_pause = !pfc->pfc_en;
- prof->rx_ppp = pfc->pfc_en;
- prof->tx_ppp = pfc->pfc_en;
+ rx_pause = prof->rx_pause && !pfc->pfc_en;
+ tx_pause = prof->tx_pause && !pfc->pfc_en;
+ rx_ppp = pfc->pfc_en;
+ tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- prof->tx_pause,
- prof->tx_ppp,
- prof->rx_pause,
- prof->rx_ppp);
- if (err)
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
en_err(priv, "Failed setting pause params\n");
- else
- mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
- prof->rx_ppp, prof->rx_pause,
- prof->tx_ppp, prof->tx_pause);
+ return err;
+ }
+
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->rx_pause = rx_pause;
+ prof->tx_pause = tx_pause;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bdda17d2ea0f..9a4c4f8281bd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -970,6 +970,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
+ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+ __func__, MLX4_EN_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+ __func__, MLX4_EN_MAX_COAL_PKTS);
+ return -ERANGE;
+ }
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
@@ -1003,27 +1019,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
if (pause->autoneg)
return -EINVAL;
- priv->prof->tx_pause = pause->tx_pause != 0;
- priv->prof->rx_pause = pause->rx_pause != 0;
+ tx_pause = !!(pause->tx_pause);
+ rx_pause = !!(pause->rx_pause);
+ rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
+ tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
- en_err(priv, "Failed setting pause params\n");
- else
- mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
- priv->prof->rx_ppp,
- priv->prof->rx_pause,
- priv->prof->tx_ppp,
- priv->prof->tx_pause);
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
+ en_err(priv, "Failed setting pause params, err = %d\n", err);
+ return err;
+ }
+
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+ priv->prof->tx_pause = tx_pause;
+ priv->prof->rx_pause = rx_pause;
+ priv->prof->tx_ppp = tx_ppp;
+ priv->prof->rx_ppp = rx_ppp;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index bf7628db098a..22c3fdd5482a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -163,9 +163,9 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
- params->prof[i].rx_pause = 1;
+ params->prof[i].rx_pause = !(pfcrx || pfctx);
params->prof[i].rx_ppp = pfcrx;
- params->prof[i].tx_pause = 1;
+ params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d223e7cb68ba..0160c93de6d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3125,6 +3125,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ u8 prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+ priv->ets.prio_tc[prio] = prio;
+ priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b681555..0710b3677464 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include <linux/export.h>
#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ if (!mlx4_qp_lookup(dev, rule->qpn)) {
+ mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+ ret = -EINVAL;
+ goto out;
+ }
+
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
list_for_each_entry(cur, &rule->list, list) {
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
- if (ret < 0) {
- mlx4_free_cmd_mailbox(dev, mailbox);
- return ret;
- }
+ if (ret < 0)
+ goto out;
+
size += ret;
}
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
}
}
+out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index df0f39611c5e..247d340be743 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -141,6 +141,9 @@ enum {
#define MLX4_EN_TX_COAL_PKTS 16
#define MLX4_EN_TX_COAL_TIME 0x10
+#define MLX4_EN_MAX_COAL_PKTS U16_MAX
+#define MLX4_EN_MAX_COAL_TIME U16_MAX
+
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
#define MLX4_EN_RX_RATE_HIGH 450000
@@ -472,6 +475,7 @@ struct mlx4_en_frag_info {
#define MLX4_EN_BW_MIN 1
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+#define MLX4_EN_TC_VENDOR 0
#define MLX4_EN_TC_ETS 7
enum dcb_pfc_type {
@@ -542,8 +546,8 @@ struct mlx4_en_priv {
u16 rx_usecs_low;
u32 pkt_rate_high;
u16 rx_usecs_high;
- u16 sample_interval;
- u16 adaptive_rx_coal;
+ u32 sample_interval;
+ u32 adaptive_rx_coal;
u32 msg_enable;
u32 loopback_ok;
u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 6143113a7fef..474ff36b9755 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -387,6 +387,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
__mlx4_qp_free_icm(dev, qpn);
}
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+
+ spin_unlock(&qp_table->lock);
+ return qp;
+}
+
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -474,6 +487,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
}
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+ mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
cmd->qp_context.qos_vport = params->qos_vport;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1822382212ee..d6b06bef1b69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5048,6 +5048,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_FS_RULE]);
list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule->mirr_mbox);
kfree(fs_rule);
state = 0;
break;
@@ -5214,6 +5215,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+ struct mlx4_vf_immed_vlan_work *work)
+{
+ ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+ ctx->qp_context.qos_vport = work->qos_vport;
+}
+
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
@@ -5328,11 +5336,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
- upd_context->qp_mask |=
- cpu_to_be64(1ULL <<
- MLX4_UPD_QP_MASK_QOS_VPP);
- upd_context->qp_context.qos_vport =
- work->qos_vport;
+
+ if (dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_QOS_VPP)
+ update_qos_vpp(upd_context, work);
}
err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 38981db43bc3..2d235e8433be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2741,6 +2741,9 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_unlock(&priv->state_lock);
+ if (mlx5e_vxlan_allowed(priv->mdev))
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
@@ -3785,13 +3788,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (netdev->reg_state != NETREG_REGISTERED)
return;
- /* Device already registered: sync netdev system state */
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- udp_tunnel_get_rx_info(netdev);
- rtnl_unlock();
- }
-
queue_work(priv->wq, &priv->set_rx_mode_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a8966e6dbe1b..5d6eab19a9d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1924,26 +1924,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
memset(vf_stats, 0, sizeof(*vf_stats));
vf_stats->rx_packets =
MLX5_GET_CTR(out, received_eth_unicast.packets) +
+ MLX5_GET_CTR(out, received_ib_unicast.packets) +
MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets) +
MLX5_GET_CTR(out, received_eth_broadcast.packets);
vf_stats->rx_bytes =
MLX5_GET_CTR(out, received_eth_unicast.octets) +
+ MLX5_GET_CTR(out, received_ib_unicast.octets) +
MLX5_GET_CTR(out, received_eth_multicast.octets) +
+ MLX5_GET_CTR(out, received_ib_multicast.octets) +
MLX5_GET_CTR(out, received_eth_broadcast.octets);
vf_stats->tx_packets =
MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
vf_stats->tx_bytes =
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
vf_stats->multicast =
- MLX5_GET_CTR(out, received_eth_multicast.packets);
+ MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets);
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 331a6ca4856d..5f3402ba9916 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -153,6 +153,7 @@ static void del_rule(struct fs_node *node);
static void del_flow_table(struct fs_node *node);
static void del_flow_group(struct fs_node *node);
static void del_fte(struct fs_node *node);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
static void tree_init_node(struct fs_node *node,
unsigned int refcount,
@@ -1690,24 +1691,28 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
static int init_root_ns(struct mlx5_flow_steering *steering)
{
+ int err;
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (!steering->root_ns)
- goto cleanup;
+ return -ENOMEM;
- if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
- goto cleanup;
+ err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+ if (err)
+ goto out_err;
set_prio_attrs(steering->root_ns);
- if (create_anchor_flow_table(steering))
- goto cleanup;
+ err = create_anchor_flow_table(steering);
+ if (err)
+ goto out_err;
return 0;
-cleanup:
- mlx5_cleanup_fs(steering->dev);
- return -ENOMEM;
+out_err:
+ cleanup_root_ns(steering->root_ns);
+ steering->root_ns = NULL;
+ return err;
}
static void clean_tree(struct fs_node *node)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 981cd1d84a5b..3c183b8c083a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -548,7 +548,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -558,18 +557,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
- err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
- if (err) {
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
- irq);
- goto err_clear_mask;
- }
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
return 0;
-
-err_clear_mask:
- free_cpumask_var(priv->irq_info[i].mask);
- return err;
}
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1e2c8eca3af1..60e1edcbe573 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -809,6 +809,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -818,9 +819,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- kfree(sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+out:
+ kfree(sfd_pl);
return err;
}
@@ -845,6 +853,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
bool adding, bool dynamic)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -855,9 +864,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_vid, lag_id);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- kfree(sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+out:
+ kfree(sfd_pl);
return err;
}
@@ -891,6 +907,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
u16 fid, u16 mid, bool adding)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -900,7 +917,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
MLXSW_REG_SFD_REC_ACTION_NOP, mid);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
kfree(sfd_pl);
return err;
}
@@ -1423,8 +1448,7 @@ do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
if (err) {
- if (net_ratelimit())
- netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
return;
}
@@ -1484,8 +1508,7 @@ do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
if (err) {
- if (net_ratelimit())
- netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 4ca82bd8c4f0..eee6e59e6cf3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -854,6 +854,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
netdev_tx_sent_queue(nd_q, txbuf->real_len);
+ skb_tx_timestamp(skb);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -866,8 +868,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
tx_ring->wr_ptr_add = 0;
}
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
err_unmap:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
cmd.req.arg3 = 0;
if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
- netxen_issue_cmd(adapter, &cmd);
+ rcode = netxen_issue_cmd(adapter, &cmd);
if (rcode != NX_RCODE_SUCCESS)
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index ed014bdbbabd..457e30427535 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -271,16 +271,34 @@ struct qed_tm_iids {
u32 per_vf_tids;
};
-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_mngr *p_mngr,
struct qed_tm_iids *iids)
{
- u32 i, j;
-
- for (i = 0; i < MAX_CONN_TYPES; i++) {
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ int i, j;
+
+ /* Timers is a special case -> we don't count how many cids require
+ * timers but what's the max cid that will be used by the timer block.
+ * therefore we traverse in reverse order, and once we hit a protocol
+ * that requires the timers memory, we'll sum all the protocols up
+ * to that one.
+ */
+ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
iids->per_vf_cids += p_cfg->cids_per_vf;
}
}
@@ -696,7 +714,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* TM PF */
p_cli = &p_mngr->clients[ILT_CLI_TM];
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
p_blk = &p_cli->pf_blks[0];
@@ -1591,7 +1609,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
u8 i;
memset(&tm_iids, 0, sizeof(tm_iids));
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index afe5e57d9acb..d02313770fc2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -850,7 +850,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
NULL) +
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
NULL);
- norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
@@ -1628,6 +1628,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
+ p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
+ link->speed.autoneg;
+
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
link->pause.autoneg = !!(link_temp &
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 333c7442e48a..0b949c6d83fc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -711,7 +711,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
- if (!IS_ENABLED(CONFIG_QED_RDMA))
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
return 0;
for_each_hwfn(cdev, i)
@@ -1239,7 +1240,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
/* TODO - at the moment assume supported and advertised speed equal */
if_link->supported_caps = QED_LM_FIBRE_BIT;
- if (params.speed.autoneg)
+ if (link_caps.default_speed_autoneg)
if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
@@ -1249,6 +1250,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.autoneg)
+ if_link->advertised_caps |= QED_LM_Autoneg_BIT;
+ else
+ if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index dff520ed069b..7b7a84d2c839 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -35,6 +35,7 @@ struct qed_mcp_link_params {
struct qed_mcp_link_capabilities {
u32 speed_capabilities;
+ bool default_speed_autoneg;
};
struct qed_mcp_link_state {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index d2d6621fe0e5..48bc5c151336 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3573,6 +3573,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
+ struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
struct qed_mcp_link_capabilities caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
@@ -3589,9 +3590,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
if (!vf_info)
continue;
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ /* Only hwfn0 is actually interested in the link speed.
+ * But since only it would receive an MFW indication of link,
+ * need to take configuration from it - otherwise things like
+ * rate limiting for hwfn1 VF would not work.
+ */
+ memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+ sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
sizeof(caps));
/* Modify link according to the VF's configured link state */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index abf5bf11f865..0645124a887b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -204,7 +204,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
/* copy acquire response from buffer to p_hwfn */
memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 509b596cf1e8..bd1ec70fb736 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
}
return -EIO;
}
- usleep_range(1000, 1500);
+ udelay(1200);
}
if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index d7107055ec60..2f656f395f39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -128,6 +128,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
return 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index be258d90de9e..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_mpi_coredump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
/* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_reg_dump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6e2add979471..8bbb55f31909 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
/* Allocate rx SKB if we don't have one available. */
if (!qca->rx_skb) {
- qca->rx_skb = netdev_alloc_skb(net_dev,
- net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+ net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb, qca->rx_skb->dev);
qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx_ni(qca->rx_skb);
- qca->rx_skb = netdev_alloc_skb(net_dev,
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
if (!qca->rx_buffer)
return -ENOBUFS;
- qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
kfree(qca->rx_buffer);
netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 18e68c91e651..59b932db0d42 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4861,6 +4861,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
rtl_generic_op(tp, tp->pll_power_ops.up);
+
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
@@ -8446,12 +8449,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_msi_5;
}
+ pci_set_drvdata(pdev, dev);
+
rc = register_netdev(dev);
if (rc < 0)
goto err_out_cnt_6;
- pci_set_drvdata(pdev, dev);
-
netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b6816ae00b7a..c8fd99b3ca29 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3133,7 +3133,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- dev_err(&ndev->dev, "failed to initialise MDIO\n");
+ dev_err(&pdev->dev, "failed to initialise MDIO\n");
goto out_release;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 10d6059b2f26..f4074e25fb71 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -38,6 +38,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
+ u32 reg_value;
/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
* formula = (1/ptp_clock) * 1000000000
@@ -54,10 +55,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
data &= PTP_SSIR_SSINC_MASK;
+ reg_value = data;
if (gmac4)
- data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+ reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
- writel(data, ioaddr + PTP_SSIR);
+ writel(reg_value, ioaddr + PTP_SSIR);
return data;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 98bbb91336e4..c212d1dd8bfd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -478,7 +478,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* take time stamp for all event messages */
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->plat->has_gmac4)
+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+ else
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -510,7 +513,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->plat->has_gmac4)
+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+ else
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -544,7 +550,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->plat->has_gmac4)
+ snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+ else
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index c06938c47af5..174777cd888e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -63,7 +63,8 @@
/* Enable Snapshot for Messages Relevant to Master */
#define PTP_TCR_TSMSTRENA BIT(15)
/* Select PTP packets for Taking Snapshots */
-#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
+#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
+#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */
#define PTP_TCR_TSENMACADDR BIT(18)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a2371aa14a49..e45e2f14fb94 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
len = (val & RCR_ENTRY_L2_LEN) >>
RCR_ENTRY_L2_LEN_SHIFT;
- len -= ETH_FCS_LEN;
+ append_size = len + ETH_HLEN + ETH_FCS_LEN;
addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
RCR_ENTRY_PKTBUFSZ_SHIFT];
off = addr & ~PAGE_MASK;
- append_size = rcr_size;
if (num_rcr == 1) {
int ptype;
@@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
else
skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
- append_size = len - skb->len;
+ append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3f1971d485f3..d7cb205fe7e2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@ do { \
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x01234567
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_ALE_VLAN_AWARE 1
@@ -282,6 +282,10 @@ struct cpsw_ss_regs {
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
@@ -901,7 +905,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
- else if (phy->speed == 10)
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= BIT(18); /* In Band mode */
if (priv->rx_pause)
@@ -1136,6 +1141,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN |
ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1159,11 +1166,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
switch (cpsw->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
break;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3c1f89ab0110..92ad43e53c72 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -209,6 +209,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
struct pcpu_sw_netstats *stats;
+ unsigned int len;
int err = 0;
void *oiph;
@@ -222,8 +223,10 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
- if (!tun_dst)
+ if (!tun_dst) {
+ geneve->dev->stats.rx_dropped++;
goto drop;
+ }
/* Update tunnel dst according to Geneve options. */
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
gnvh->options, gnvh->opt_len * 4);
@@ -231,8 +234,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
/* Drop packets w/ critical options,
* since we don't support any...
*/
- if (gnvh->critical)
+ if (gnvh->critical) {
+ geneve->dev->stats.rx_frame_errors++;
+ geneve->dev->stats.rx_errors++;
goto drop;
+ }
}
skb_reset_mac_header(skb);
@@ -243,8 +249,10 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
goto drop;
+ }
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
@@ -276,13 +284,15 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
}
- stats = this_cpu_ptr(geneve->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- gro_cells_receive(&geneve->gro_cells, skb);
+ len = skb->len;
+ err = gro_cells_receive(&geneve->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS)) {
+ stats = this_cpu_ptr(geneve->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ }
return;
drop:
/* Consume bad packet */
@@ -332,7 +342,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct geneve_sock *gs;
int opts_len;
- /* Need Geneve and inner Ethernet header to be present */
+ /* Need UDP and Geneve header to be present */
if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
goto drop;
@@ -355,8 +365,10 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
htons(ETH_P_TEB),
- !net_eq(geneve->net, dev_net(geneve->dev))))
+ !net_eq(geneve->net, dev_net(geneve->dev)))) {
+ geneve->dev->stats.rx_dropped++;
goto drop;
+ }
geneve_rx(geneve, gs, skb);
return 0;
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 4bad0b894e9c..27160d1870e1 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (s->par.bitrate <= 0)
+ return -EINVAL;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index c2ac39a940f7..14f58b60d1b5 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -151,6 +151,13 @@ static void netvsc_destroy_buf(struct hv_device *device)
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VM_PKT_DATA_INBAND, 0);
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
/*
* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
@@ -211,6 +218,15 @@ static void netvsc_destroy_buf(struct hv_device *device)
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VM_PKT_DATA_INBAND, 0);
+
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
+
/* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
*/
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index f355df7cf84a..1b980f12663a 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -888,7 +888,7 @@ static struct ieee802154_ops adf7242_ops = {
.set_cca_ed_level = adf7242_set_cca_ed_level,
};
-static void adf7242_debug(u8 irq1)
+static void adf7242_debug(struct adf7242_local *lp, u8 irq1)
{
#ifdef DEBUG
u8 stat;
@@ -932,7 +932,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n",
__func__, irq1);
- adf7242_debug(irq1);
+ adf7242_debug(lp, irq1);
xmit = test_bit(FLAG_XMIT, &lp->flags);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 627eb825eb74..c747ab652665 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -299,6 +299,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
+ if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+ ipvlan->phy_dev->dev_addr))
+ skb->pkt_type = PACKET_OTHERHOST;
+
ret = RX_HANDLER_ANOTHER;
success = true;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2caac0c37059..365a48cfcbbf 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,7 +742,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_fill_iv(iv, secy->sci, pn);
sg_init_table(sg, ret);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(ret);
+ }
if (tx_sc->encrypt) {
int len = skb->len - macsec_hdr_len(sci_present) -
@@ -949,7 +954,11 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
sg_init_table(sg, ret);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(ret);
+ }
if (hdr->tci_an & MACSEC_TCI_E) {
/* confidentiality: ethernet + macsec header
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6d55049cd3dc..e8ad4d060da7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1377,9 +1377,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
unregister_netdev:
+ /* macvlan_uninit would free the macvlan port */
unregister_netdevice(dev);
+ return err;
destroy_macvlan_port:
- if (create)
+ /* the macvlan port may be freed by macvlan_uninit when fail to register.
+ * so we destroy the macvlan port only when it's valid.
+ */
+ if (create && macvlan_port_get_rtnl(dev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 963838d4fac1..599ce24c514f 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -122,10 +122,9 @@ int mdio_mux_init(struct device *dev,
pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
if (pb == NULL) {
ret_val = -ENOMEM;
- goto err_parent_bus;
+ goto err_pb_kz;
}
-
pb->switch_data = data;
pb->switch_fn = switch_fn;
pb->current_child = -1;
@@ -154,6 +153,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
ret_val = -ENOMEM;
+ devm_kfree(dev, cb);
of_node_put(child_bus_node);
break;
}
@@ -170,7 +170,6 @@ int mdio_mux_init(struct device *dev,
mdiobus_free(cb->mii_bus);
devm_kfree(dev, cb);
} else {
- of_node_get(child_bus_node);
cb->next = pb->children;
pb->children = cb;
}
@@ -181,9 +180,11 @@ int mdio_mux_init(struct device *dev,
return 0;
}
+ devm_kfree(dev, pb);
+err_pb_kz:
/* balance the reference of_mdio_find_bus() took */
- put_device(&pb->mii_bus->dev);
-
+ if (!mux_bus)
+ put_device(&parent_bus->dev);
err_parent_bus:
of_node_put(parent_bus_node);
return ret_val;
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out_free_mdiobus;
+ }
dev_info(&pdev->dev, "no regulator found\n");
data->regulator = NULL;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 92af182951be..20fbcc9c4687 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -197,8 +197,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
}
ret = xgene_enet_ecc_init(pdata);
- if (ret)
+ if (ret) {
+ if (pdata->dev->of_node)
+ clk_disable_unprepare(pdata->clk);
return ret;
+ }
xgene_gmac_reset(pdata);
return 0;
@@ -229,7 +232,7 @@ static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id,
val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) |
SET_VAL(HSTMIIMWRDAT, data);
- xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data);
+ xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val);
val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE);
xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val);
@@ -311,6 +314,30 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
}
#endif
+static const struct of_device_id xgene_mdio_of_match[] = {
+ {
+ .compatible = "apm,xgene-mdio-rgmii",
+ .data = (void *)XGENE_MDIO_RGMII
+ },
+ {
+ .compatible = "apm,xgene-mdio-xfi",
+ .data = (void *)XGENE_MDIO_XFI
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_mdio_acpi_match[] = {
+ { "APMC0D65", XGENE_MDIO_RGMII },
+ { "APMC0D66", XGENE_MDIO_XFI },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
+#endif
+
+
static int xgene_mdio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -364,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
return ret;
mdio_bus = mdiobus_alloc();
- if (!mdio_bus)
- return -ENOMEM;
+ if (!mdio_bus) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
mdio_bus->name = "APM X-Gene MDIO bus";
@@ -394,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
mdio_bus->phy_mask = ~0;
ret = mdiobus_register(mdio_bus);
if (ret)
- goto out;
+ goto out_mdiobus;
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
acpi_register_phy, NULL, mdio_bus, NULL);
@@ -402,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
}
if (ret)
- goto out;
+ goto out_mdiobus;
pdata->mdio_bus = mdio_bus;
xgene_mdio_status = true;
return 0;
-out:
+out_mdiobus:
mdiobus_free(mdio_bus);
+out_clk:
+ if (dev->of_node)
+ clk_disable_unprepare(pdata->clk);
+
return ret;
}
@@ -430,32 +463,6 @@ static int xgene_mdio_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_mdio_of_match[] = {
- {
- .compatible = "apm,xgene-mdio-rgmii",
- .data = (void *)XGENE_MDIO_RGMII
- },
- {
- .compatible = "apm,xgene-mdio-xfi",
- .data = (void *)XGENE_MDIO_XFI
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[] = {
- { "APMC0D65", XGENE_MDIO_RGMII },
- { "APMC0D66", XGENE_MDIO_XFI },
- { }
-};
-
-MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
-#endif
-
static struct platform_driver xgene_mdio_driver = {
.driver = {
.name = "xgene-mdio",
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 354241b53c1d..594a11d42401 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -132,10 +132,6 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
#define GET_BIT(field, src) \
xgene_enet_get_field_value(field ## _POS, 1, src)
-static const struct of_device_id xgene_mdio_of_match[];
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[];
-#endif
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 6e12401b5102..4d217649c8b1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev)
if (phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);
+ /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
+ * implement Clause 22 registers
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EINVAL;
+
return genphy_aneg_done(phydev);
}
@@ -925,7 +931,7 @@ void phy_start(struct phy_device *phydev)
break;
case PHY_HALTED:
/* make sure interrupts are re-enabled for the PHY */
- if (phydev->irq != PHY_POLL) {
+ if (phy_interrupt_is_valid(phydev)) {
err = phy_enable_interrupts(phydev);
if (err < 0)
break;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fc4c2ccc3d22..1e4969d90f1a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -255,7 +255,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
@@ -511,13 +511,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
goto out;
}
- skb_queue_tail(&pf->xq, skb);
-
switch (pf->kind) {
case INTERFACE:
- ppp_xmit_process(PF_TO_PPP(pf));
+ ppp_xmit_process(PF_TO_PPP(pf), skb);
break;
case CHANNEL:
+ skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
@@ -1261,8 +1260,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
put_unaligned_be16(proto, pp);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
- skb_queue_tail(&ppp->file.xq, skb);
- ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp, skb);
+
return NETDEV_TX_OK;
outf:
@@ -1416,13 +1415,14 @@ static void ppp_setup(struct net_device *dev)
*/
/* Called to do any work queued up on the transmit side that can now be done */
-static void __ppp_xmit_process(struct ppp *ppp)
+static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
- struct sk_buff *skb;
-
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
+
+ if (skb)
+ skb_queue_tail(&ppp->file.xq, skb);
while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
@@ -1436,7 +1436,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
ppp_xmit_unlock(ppp);
}
-static void ppp_xmit_process(struct ppp *ppp)
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
local_bh_disable();
@@ -1444,7 +1444,7 @@ static void ppp_xmit_process(struct ppp *ppp)
goto err;
(*this_cpu_ptr(ppp->xmit_recursion))++;
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, skb);
(*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1454,6 +1454,8 @@ static void ppp_xmit_process(struct ppp *ppp)
err:
local_bh_enable();
+ kfree_skb(skb);
+
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
@@ -1938,7 +1940,7 @@ static void __ppp_channel_push(struct channel *pch)
if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, NULL);
}
}
@@ -3157,6 +3159,15 @@ ppp_connect_channel(struct channel *pch, int unit)
goto outl;
ppp_lock(ppp);
+ spin_lock_bh(&pch->downl);
+ if (!pch->chan) {
+ /* Don't connect unregistered channels */
+ spin_unlock_bh(&pch->downl);
+ ppp_unlock(ppp);
+ ret = -ENOTCONN;
+ goto outl;
+ }
+ spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index dc36c2ec1d10..fa2c7bd638be 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1951b1085cb8..3045c9662ed6 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -465,7 +465,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MRU;
- ip_rt_put(rt);
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed25252aac..cfd81eb1b532 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
if(x < 0 || x > comp->rslot_limit)
goto bad;
+ /* Check if the cstate is initialized */
+ if (!comp->rstate[x].initialized)
+ goto bad;
+
comp->flags &=~ SLF_TOSS;
comp->recv_current = x;
} else {
@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
if (cs->cs_tcp.doff > 5)
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+ cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
*/
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 26681707fc7a..36963685d42a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1067,14 +1078,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1088,6 +1096,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1102,7 +1118,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1203,11 +1219,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
-
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1215,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1247,6 +1258,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_option_port_add;
}
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
@@ -1271,8 +1287,6 @@ err_enable_netpoll:
vlan_vids_del_by_dev(port_dev, dev);
err_vids_add:
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
dev_close(port_dev);
err_dev_open:
@@ -1910,7 +1924,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2403,7 +2417,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
@@ -2571,6 +2585,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
@@ -2688,7 +2710,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 1fca0024f294..99424c87b464 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -530,6 +530,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
@@ -719,6 +720,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -774,6 +784,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion AHS3 modem by GEMALTO */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* Telit modules */
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dc6d3b0a0be8..feb61eaffe32 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1118,6 +1118,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
u16 n = 0, index, ndplen;
u8 ready2send = 0;
u32 delayed_ndp_size;
+ size_t padding_count;
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
* accordingly. Otherwise, we should check here.
@@ -1274,11 +1275,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
- skb_out->len > ctx->min_tx_pkt)
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
- ctx->tx_max - skb_out->len);
- else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
+ skb_out->len > ctx->min_tx_pkt) {
+ padding_count = ctx->tx_max - skb_out->len;
+ memset(skb_put(skb_out, padding_count), 0, padding_count);
+ } else if (skb_out->len < ctx->tx_max &&
+ (skb_out->len % dev->maxpacket) == 0) {
*skb_put(skb_out, 1) = 0; /* force short packet */
+ }
/* set final frame length */
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index c53385a0052f..f5a96678494b 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -873,7 +873,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
offset += 0x100;
else
ret = -EINVAL;
- ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ if (!ret)
+ ret = lan78xx_read_raw_otp(dev, offset, length, data);
}
return ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e1e5e8438457..3e893fe890a0 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -531,7 +531,7 @@ err:
static const struct driver_info qmi_wwan_info = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -540,7 +540,7 @@ static const struct driver_info qmi_wwan_info = {
static const struct driver_info qmi_wwan_info_quirk_dtr = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -803,8 +803,10 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
@@ -914,6 +916,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
@@ -1036,6 +1039,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
id->driver_info = (unsigned long)&qmi_wwan_info;
}
+ /* There are devices where the same interface number can be
+ * configured as different functions. We should only bind to
+ * vendor specific functions when matching on interface number
+ */
+ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+ dev_dbg(&intf->dev,
+ "Rejecting interface number match for class %02x\n",
+ desc->bInterfaceClass);
+ return -ENODEV;
+ }
+
/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b2d7c7e32250..3cdfa2465e3f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -519,6 +519,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -4506,6 +4507,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index fbc853e64531..ee7460ee3d05 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -425,6 +425,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
+ peer->gso_max_size = dev->gso_max_size;
+ peer->gso_max_segs = dev->gso_max_segs;
+
err = register_netdevice(peer);
put_net(net);
net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1568aedddfc9..472ed6df2221 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -529,7 +529,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
hdr = skb_vnet_hdr(skb);
sg_init_table(rq->sg, 2);
sg_set_buf(rq->sg, hdr, vi->hdr_len);
- skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+
+ err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+ if (unlikely(err < 0)) {
+ dev_kfree_skb(skb);
+ return err;
+ }
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0)
@@ -831,7 +836,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
- unsigned num_sg;
+ int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
@@ -858,11 +863,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
/* Pull header back to avoid skew in tx bytes calculations. */
__skb_pull(skb, hdr_len);
} else {
sg_set_buf(sq->sg, hdr, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
+ num_sg++;
}
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4afba17e2403..f809eed0343c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2962,6 +2962,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
/* we need to enable NAPI, otherwise dev_close will deadlock */
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
+ /*
+ * Need to clear the quiesce bit to ensure that vmxnet3_close
+ * can quiesce the device properly
+ */
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
dev_close(adapter->netdev);
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 346e48698555..42c9480acdc7 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -585,13 +585,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (!IS_ERR(neigh))
+ if (!IS_ERR(neigh)) {
ret = dst_neigh_output(dst, neigh, skb);
+ rcu_read_unlock_bh();
+ return ret;
+ }
rcu_read_unlock_bh();
err:
- if (unlikely(ret < 0))
- vrf_tx_error(skb->dev, skb);
+ vrf_tx_error(skb->dev, skb);
return ret;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 983e941bdf29..28afdf22b88f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -930,7 +930,7 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
/* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;
if (net_ratelimit())
@@ -2816,17 +2816,21 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
static int vxlan_sock_add(struct vxlan_dev *vxlan)
{
- bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
+ bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata;
+ bool ipv4 = !ipv6 || metadata;
int ret = 0;
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
- if (ipv6 || metadata)
+ if (ipv6) {
ret = __vxlan_sock_add(vxlan, true);
+ if (ret < 0 && ret != -EAFNOSUPPORT)
+ ipv4 = false;
+ }
#endif
- if (!ret && (!ipv6 || metadata))
+ if (ipv4)
ret = __vxlan_sock_add(vxlan, false);
if (ret < 0)
vxlan_sock_release(vxlan);
@@ -2912,6 +2916,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
+ if (lowerdev) {
+ dev->gso_max_size = lowerdev->gso_max_size;
+ dev->gso_max_segs = lowerdev->gso_max_segs;
+ }
+
if (conf->mtu) {
err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
if (err)
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 65647533b401..a8bd68f252e9 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -137,7 +137,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->tx_ring_size = TX_BD_RING_LEN;
/* Alloc Rx BD */
priv->rx_bd_base = dma_alloc_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_rx_bd, GFP_KERNEL);
if (!priv->rx_bd_base) {
@@ -148,7 +148,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc Tx BD */
priv->tx_bd_base = dma_alloc_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_tx_bd, GFP_KERNEL);
if (!priv->tx_bd_base) {
@@ -158,7 +158,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
}
/* Alloc parameter ram for ucc hdlc */
- priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+ priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
if (priv->ucc_pram_offset < 0) {
@@ -295,11 +295,11 @@ free_ucc_pram:
qe_muram_free(priv->ucc_pram_offset);
free_tx_bd:
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
free_rx_bd:
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
free_uccf:
ucc_fast_free(priv->uccf);
@@ -454,7 +454,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
{
struct net_device *dev = priv->ndev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
u32 bd_status;
@@ -688,7 +688,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
priv->rx_bd_base = NULL;
@@ -697,7 +697,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->tx_bd_base) {
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
priv->tx_bd_base = NULL;
@@ -1002,7 +1002,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct ucc_hdlc_private *uhdlc_priv = NULL;
struct ucc_tdm_info *ut_info;
- struct ucc_tdm *utdm;
+ struct ucc_tdm *utdm = NULL;
struct resource res;
struct net_device *dev;
hdlc_device *hdlc;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 47fdb87d3567..8a9aced850be 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
proto->restart_counter--;
- } else
+ } else if (netif_carrier_ok(proto->dev))
+ ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+ 0, NULL);
+ else
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
0, NULL);
break;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index db363856e0b5..2b064998915f 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -347,6 +347,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
card->rambase == NULL) {
pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
+ return -ENOMEM;
}
/* PLX PCI 9050 workaround for local configuration register read bug */
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 7d3231acfb24..82bdec744055 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -83,6 +83,8 @@ enum bmi_cmd_id {
#define BMI_NVRAM_SEG_NAME_SZ 16
#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
+#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 0b4d79659884..041ef3be87b9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1059,7 +1059,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
*/
BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7b3017f55e3d..65ad7a130ca1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -652,7 +652,7 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
u8 board_id, chip_id;
- int ret;
+ int ret, bmi_board_id_param;
address = ar->hw_params.patch_load_addr;
@@ -676,8 +676,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return ret;
}
- ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
- &result);
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
+ else
+ bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp for board id check: %d\n",
ret);
@@ -739,6 +744,11 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return ret;
}
+ /* As of now pre-cal is valid for 10_4 variants */
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp (%d)\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 82a4c67f3672..0dadc6044dba 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -624,17 +624,21 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32];
+ char buf[32] = {0};
+ ssize_t rc;
int ret;
- simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
- /* make sure that buf is null terminated */
- buf[sizeof(buf) - 1] = 0;
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
/* drop the possible '\n' from the end */
- if (buf[count - 1] == '\n')
- buf[count - 1] = 0;
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
mutex_lock(&ar->conf_mutex);
@@ -1942,6 +1946,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
+ struct ath10k_vif *arvif;
+
+ /* Just check for for the first vif alone, as all the vifs will be
+ * sharing the same channel and if the channel is disabled, all the
+ * vifs will share the same 'is_started' state.
+ */
+ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+ if (!arvif->is_started)
+ return -EINVAL;
ieee80211_radar_detected(ar->hw);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 17ab8efdac35..5aa5df24f4dc 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2505,7 +2505,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
break;
case WMI_VDEV_TYPE_STA:
- if (vif->bss_conf.qos)
+ if (sta->wme)
arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
break;
case WMI_VDEV_TYPE_IBSS:
@@ -5819,9 +5819,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
- changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -6054,6 +6053,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
+ if (sta->tdls) {
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+ sta,
+ WMI_TDLS_PEER_STATE_TEARDOWN);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+ sta->addr,
+ WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+ }
+
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7070,7 +7079,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
lockdep_assert_held(&ar->data_lock);
WARN_ON(ctx && vifs);
- WARN_ON(vifs && n_vifs != 1);
+ WARN_ON(vifs && !n_vifs);
/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
* on a linked list now. Doing a lookup peer -> vif -> chanctx for each
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 54df425bb0fc..e518b640aad0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3638,6 +3638,11 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
spin_lock_bh(&ar->data_lock);
ch = ar->rx_channel;
+
+ /* fetch target operating channel during channel change */
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+
spin_unlock_bh(&ar->data_lock);
if (!ch) {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 1b243c899bef..9b8562ff6698 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -5017,7 +5017,8 @@ enum wmi_10_4_vdev_param {
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 4f8d9ed04f5e..4a1054408f1a 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
}
for (i = 0; i < eesize; ++i) {
- AR5K_EEPROM_READ(i, val);
+ if (!ath5k_hw_nvram_read(ah, i, &val)) {
+ ret = -EIO;
+ goto freebuf;
+ }
buf[i] = val;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a35f78be8dec..acef4ec928c1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50;
u32 reg, last_val;
+ /* Check if chip failed to wake up */
+ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+ return false;
+
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index f8506037736f..404fc306cb2a 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain)
EXPORT_SYMBOL(ath_is_49ghz_allowed);
/* Frequency is one where radar detection is required */
-static bool ath_is_radar_freq(u16 center_freq)
+static bool ath_is_radar_freq(u16 center_freq,
+ struct ath_regulatory *reg)
+
{
+ if (reg->country_code == CTRY_INDIA)
+ return (center_freq >= 5500 && center_freq <= 5700);
return (center_freq >= 5260 && center_freq <= 5700);
}
@@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ieee80211_channel *ch)
{
- if (ath_is_radar_freq(ch->center_freq) ||
+ if (ath_is_radar_freq(ch->center_freq, reg) ||
(ch->flags & IEEE80211_CHAN_RADAR))
return;
@@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
}
}
-/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
-static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
+/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
+static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
@@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
- if (!ath_is_radar_freq(ch->center_freq))
+ if (!ath_is_radar_freq(ch->center_freq, reg))
continue;
/* We always enable radar detection/DFS on this
* frequency range. Additionally we also apply on
@@ -505,7 +510,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
/* We always apply this */
- ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy, reg);
/*
* This would happen when we have sent a custom regulatory request
@@ -653,7 +658,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
}
wiphy_apply_custom_regulatory(wiphy, regd);
- ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy, reg);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 24b07a0ce6f7..f8bce58d48cc 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -129,9 +129,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
u32 *d = dst;
const volatile u32 __iomem *s = src;
- /* size_t is unsigned, if (count%4 != 0) it will wrap */
- for (count += 4; count > 4; count -= 4)
+ for (; count >= 4; count -= 4)
*d++ = __raw_readl(s++);
+
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = __raw_readl(s);
+
+ memcpy(d, &tmp, count);
+ }
}
void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
@@ -148,8 +154,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
volatile u32 __iomem *d = dst;
const u32 *s = src;
- for (count += 4; count > 4; count -= 4)
+ for (; count >= 4; count -= 4)
__raw_writel(*s++, d++);
+
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = 0;
+
+ memcpy(&tmp, s, count);
+ __raw_writel(tmp, d);
+ }
}
void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index fae4f1285d08..94a356bbb6b9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -501,16 +501,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
assoc_resp_ielen = 0;
}
- mutex_lock(&wil->mutex);
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
evt->cid);
- mutex_unlock(&wil->mutex);
/* no need for cleanup, wil_reset will do that */
return;
}
+ mutex_lock(&wil->mutex);
+
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (!test_bit(wil_status_fwconnecting, wil->status)) {
@@ -608,6 +608,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
wil->sinfo_gen++;
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "status_resetting, cancel disconnect event\n");
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, evt->bssid, reason_code, true);
mutex_unlock(&wil->mutex);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 85d949e03f79..f78d91b69287 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
* @dev_addr: optional device address.
*
* P2P needs mac addresses for P2P device and interface. If no device
- * address it specified, these are derived from the primary net device, ie.
- * the permanent ethernet address of the device.
+ * address it specified, these are derived from a random ethernet
+ * address.
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
- struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
- bool local_admin = false;
+ bool random_addr = false;
- if (!dev_addr || is_zero_ether_addr(dev_addr)) {
- dev_addr = pri_ifp->mac_addr;
- local_admin = true;
- }
+ if (!dev_addr || is_zero_ether_addr(dev_addr))
+ random_addr = true;
- /* Generate the P2P Device Address. This consists of the device's
- * primary MAC address with the locally administered bit set.
+ /* Generate the P2P Device Address obtaining a random ethernet
+ * address with the locally administered bit set.
*/
- memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
- if (local_admin)
- p2p->dev_addr[0] |= 0x02;
+ if (random_addr)
+ eth_random_addr(p2p->dev_addr);
+ else
+ memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index d4b73dedf89b..b35adbca7a5d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 17
#define IWL7265_UCODE_API_MIN 17
-#define IWL7265D_UCODE_API_MIN 17
-#define IWL3168_UCODE_API_MIN 20
+#define IWL7265D_UCODE_API_MIN 22
+#define IWL3168_UCODE_API_MIN 22
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 8d3e53fac1da..20d08ddb4388 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
#define IWL8265_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 17
-#define IWL8265_UCODE_API_MIN 20
+#define IWL8000_UCODE_API_MIN 22
+#define IWL8265_UCODE_API_MIN 22
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index ea1618525878..e267377edbe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -65,12 +65,12 @@
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET 0x800000
-#define IWL_A000_DCCM_LEN 0x18000
+#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
#define IWL_A000_DCCM2_OFFSET 0x880000
#define IWL_A000_DCCM2_LEN 0x8000
#define IWL_A000_SMEM_OFFSET 0x400000
-#define IWL_A000_SMEM_LEN 0x68000
+#define IWL_A000_SMEM_LEN 0xD0000
#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
index 88f260db3744..68412ff2112e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
@@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
}
IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt)
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt)
{
bool triggered = false;
@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
}
}
spin_unlock(&notif_wait->notif_wait_lock);
-
}
- if (triggered)
- wake_up_all(&notif_wait->notif_waitq);
+ return triggered;
}
-IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
+IWL_EXPORT_SYMBOL(iwl_notification_wait);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
index 0f9995ed71cd..368884be4e7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
*
* This structure is not used directly, to wait for a
* notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
+ * iwl_init_notification_wait() with appropriate
* parameters. Then do whatever will cause the ucode
* to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
+ * call iwl_wait_notification().
*
* Each notification is one-shot. If at some point we
* need to support multi-shot notifications (which
@@ -114,10 +115,24 @@ struct iwl_notification_wait {
/* caller functions */
void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
- struct iwl_rx_packet *pkt);
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+static inline void
+iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
+{
+ wake_up_all(&notif_data->notif_waitq);
+}
+
+static inline void
+iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt)
+{
+ if (iwl_notification_wait(notif_data, pkt))
+ iwl_notification_notify(notif_data);
+}
+
/* user functions */
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 406ef301b8ab..da8234b762bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -369,6 +369,7 @@
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
#define DBGC_IN_SAMPLE (0xa03c00)
+#define DBGC_OUT_CTRL (0xa03c0c)
/* enable the ID buf for read */
#define WFPM_PS_CTL_CLR 0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 700d244df34b..2642d8e477b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -914,14 +914,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
return 0;
}
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
- iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
- else
- iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
{
u8 *ptr;
@@ -935,10 +927,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
/* EARLY START - firmware's configuration is hard coded */
if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
!mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
- conf_id == FW_DBG_START_FROM_ALIVE) {
- iwl_mvm_restart_early_start(mvm);
+ conf_id == FW_DBG_START_FROM_ALIVE)
return 0;
- }
if (!mvm->fw->dbg_conf_tlv[conf_id])
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c60703e0c246..2b1c691bb4b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1666,8 +1666,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
*/
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
{
+ u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+ IWL_MVM_CMD_QUEUE;
+
return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
- ~BIT(IWL_MVM_CMD_QUEUE));
+ ~BIT(cmd_queue));
}
static inline
@@ -1687,6 +1690,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
mvm->ucode_loaded = false;
+ mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_trans_stop_device(mvm->trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4d35deb628bc..6d38eec3f9d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1118,21 +1118,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
mutex_lock(&mvm->mutex);
- /* stop recording */
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /* stop recording */
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+ iwl_mvm_fw_error_dump(mvm);
+
+ /* start recording again if the firmware is not crashed */
+ if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+ mvm->fw->dbg_dest_tlv)
+ iwl_clear_bits_prph(mvm->trans,
+ MON_BUFF_SAMPLE_CTL, 0x100);
} else {
+ u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+ u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+ /* stop recording */
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
- /* wait before we collect the data till the DBGC stop */
udelay(100);
- }
+ iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+ /* wait before we collect the data till the DBGC stop */
+ udelay(500);
- iwl_mvm_fw_error_dump(mvm);
+ iwl_mvm_fw_error_dump(mvm);
- /* start recording again if the firmware is not crashed */
- WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
- mvm->fw->dbg_dest_tlv &&
- iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+ /* start recording again if the firmware is not crashed */
+ if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+ mvm->fw->dbg_dest_tlv) {
+ iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+ iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+ }
+ }
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 227c5ed9cbe6..0aea476ebf50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1867,12 +1867,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
struct rs_rate *rate = &search_tbl->rate;
const struct rs_tx_column *column = &rs_tx_columns[col_id];
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
unsigned long rate_mask = 0;
u32 rate_idx = 0;
- memcpy(search_tbl, tbl, sz);
+ memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
rate->sgi = column->sgi;
rate->ant = column->ant;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 0e60e38b2acf..b78e60eb600f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
- unsigned int hdrlen, fraglen;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ unsigned int fraglen;
+
+ /*
+ * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+ * but those are all multiples of 4 long) all goes away, but we
+ * want the *end* of it, which is going to be the start of the IP
+ * header, to be aligned when it gets pulled in.
+ * The beginning of the skb->data is aligned on at least a 4-byte
+ * boundary after allocation. Everything here is aligned at least
+ * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+ * the result.
+ */
+ skb_reserve(skb, hdrlen & 3);
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
@@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
* If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023().
*/
- hdrlen = (len <= skb_tailroom(skb)) ? len :
- sizeof(*hdr) + crypt_len + 8;
+ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index bec7d9c46087..c5203568a47a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
int ret;
- if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
- return -EIO;
-
mutex_lock(&mvm->mutex);
+ if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
ret = -EINVAL;
goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 10ef44e8ecd5..fe32de252e6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2824,7 +2824,8 @@ static struct iwl_trans_dump_data
#ifdef CONFIG_PM_SLEEP
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
- if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+ (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
return iwl_pci_fw_enter_d0i3(trans);
return 0;
@@ -2832,7 +2833,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
static void iwl_trans_pcie_resume(struct iwl_trans *trans)
{
- if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+ (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
iwl_pci_fw_exit_d0i3(trans);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4b462dc21c41..2681b5339810 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -552,8 +552,6 @@ struct mac80211_hwsim_data {
/* wmediumd portid responsible for netgroup of this radio */
u32 wmediumd;
- int power_level;
-
/* difference between this hw's clock and the real clock, in usecs */
s64 tsf_offset;
s64 bcn_delta;
@@ -730,16 +728,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
val != PS_MANUAL_POLL)
return -EINVAL;
- old_ps = data->ps;
- data->ps = val;
-
- local_bh_disable();
if (val == PS_MANUAL_POLL) {
+ if (data->ps != PS_ENABLED)
+ return -EINVAL;
+ local_bh_disable();
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
- data->ps_poll_pending = true;
- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ local_bh_enable();
+ return 0;
+ }
+ old_ps = data->ps;
+ data->ps = val;
+
+ local_bh_disable();
+ if (old_ps == PS_DISABLED && val != PS_DISABLED) {
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_ps, data);
@@ -1208,7 +1211,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rx_status.flag |= RX_FLAG_SHORT_GI;
/* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = data->power_level - 50;
+ rx_status.signal = -50;
+ if (info->control.vif)
+ rx_status.signal += info->control.vif->bss_conf.txpower;
if (data->ps != PS_DISABLED)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -1607,7 +1612,6 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
WARN_ON(data->channel && data->use_chanctx);
- data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
tasklet_hrtimer_cancel(&data->beacon_timer);
else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
@@ -2212,7 +2216,6 @@ static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_tx_failed",
"d_ps_mode",
"d_group",
- "d_tx_power",
};
#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats)
@@ -2249,7 +2252,6 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
data[i++] = ar->tx_failed;
data[i++] = ar->ps;
data[i++] = ar->group;
- data[i++] = ar->power_level;
WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
}
@@ -3154,7 +3156,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
continue;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb) {
res = -ENOMEM;
goto out_err;
@@ -3344,8 +3346,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
continue;
list_del(&data->list);
- INIT_WORK(&data->destroy_work, destroy_radio);
- schedule_work(&data->destroy_work);
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ NULL);
+ spin_lock_bh(&hwsim_radio_lock);
+
}
spin_unlock_bh(&hwsim_radio_lock);
}
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index c3a53cd6988e..7b4955cc38db 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1181,6 +1181,10 @@ static int if_spi_probe(struct spi_device *spi)
/* Initialize interrupt handling stuff. */
card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
+ if (!card->workqueue) {
+ err = -ENOMEM;
+ goto remove_card;
+ }
INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
INIT_WORK(&card->resume_work, if_spi_resume_worker);
@@ -1209,6 +1213,7 @@ release_irq:
free_irq(spi->irq, card);
terminate_workqueue:
destroy_workqueue(card->workqueue);
+remove_card:
lbs_remove_card(priv); /* will call free_netdev */
free_card:
free_if_spi_card(card);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 8677a53ef725..48d51be11f9b 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1109,6 +1109,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
+ if (priv->scan_request) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "change virtual interface: scan in process\n");
+ return -EBUSY;
+ }
+
switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 2478ccd6f2d9..1e36f11831a9 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -146,7 +146,6 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
kfree(adapter->regd);
- vfree(adapter->chan_stats);
kfree(adapter);
return 0;
}
@@ -636,6 +635,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
+ vfree(adapter->chan_stats);
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
err_init_fw:
@@ -1429,6 +1429,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
+ vfree(adapter->chan_stats);
up(sem);
exit_sem_err:
@@ -1729,6 +1730,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
+ vfree(adapter->chan_stats);
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 8d601dcf2948..486b8c75cd1f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1458,7 +1458,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
}
if (card->mpa_rx.pkt_cnt == 1)
- mport = adapter->ioport + port;
+ mport = adapter->ioport + card->mpa_rx.start_port;
if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
card->mpa_rx.buf_len, mport, 1))
@@ -1891,7 +1891,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
if (card->mpa_tx.pkt_cnt == 1)
- mport = adapter->ioport + port;
+ mport = adapter->ioport + card->mpa_tx.start_port;
ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
card->mpa_tx.buf_len, mport);
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index dbdfb3f5c507..a9f5f398b2f8 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- skb_reserve(skb, MT_DMA_HDR_LEN);
- memcpy(skb_put(skb, len), data, len);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
return skb;
}
@@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
}
@@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 13da95a24cf7..987c7c4f43cd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -142,15 +142,25 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
if (!rt2x00dev->ops->hw->set_rts_threshold &&
(tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
IEEE80211_TX_RC_USE_CTS_PROTECT))) {
- if (rt2x00queue_available(queue) <= 1)
- goto exit_fail;
+ if (rt2x00queue_available(queue) <= 1) {
+ /*
+ * Recheck for full queue under lock to avoid race
+ * conditions with rt2x00lib_txdone().
+ */
+ spin_lock(&queue->tx_lock);
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
+ spin_unlock(&queue->tx_lock);
+
+ goto exit_free_skb;
+ }
if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
- goto exit_fail;
+ goto exit_free_skb;
}
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
- goto exit_fail;
+ goto exit_free_skb;
/*
* Pausing queue has to be serialized with rt2x00lib_txdone(). Note
@@ -164,10 +174,6 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
return;
- exit_fail:
- spin_lock(&queue->tx_lock);
- rt2x00queue_pause_queue(queue);
- spin_unlock(&queue->tx_lock);
exit_free_skb:
ieee80211_free_txskb(hw, skb);
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0881ba8535f4..c78abfc7bd96 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = {
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
- 0, 2, 2 /* test mode, min, max */
+ 0, 2, 2, /* test mode, min, max */
+ 0, /* rx/tx delay */
+ 0, 0, 0, 0, 0, 0, /* current BSS id */
+ 0 /* hop set */
};
/*===========================================================================*/
@@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local)
* a_beacon_period = hops a_beacon_period = KuS
*//* 64ms = 010000 */
if (local->fw_ver == 0x55) {
- memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
+ memcpy(&local->sparm.b4, b4_default_startup_parms,
sizeof(struct b4_startup_params));
/* Translate sane kus input values to old build 4/5 format */
/* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 231f84db9ab0..6113624ccec3 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
goto err_free_dev;
}
mutex_init(&priv->io_mutex);
+ mutex_init(&priv->conf_mutex);
SET_IEEE80211_DEV(dev, &intf->dev);
usb_set_intfdata(intf, dev);
@@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
printk(KERN_ERR "rtl8187: Cannot register device\n");
goto err_free_dmabuf;
}
- mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 75ffeaa54ed8..e15b462d096b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1572,7 +1572,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
+
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpci->tx_ring[i].cur_tx_rp = 0;
+ rtlpci->tx_ring[i].cur_tx_wp = 0;
+ }
+
ring->idx = 0;
+ ring->entries = rtlpci->txringcount[i];
}
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index aba60c3145c5..618e509e75d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
/* Configuration Space offset 0x70f BIT7 is used to control L0S */
tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
- _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7));
+ _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
+ ASPM_L1_LATENCY << 3);
/* Configuration Space offset 0x719 Bit3 is for L1
* BIT4 is for clock request
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 603c90470225..15b2350d9f45 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3427,6 +3427,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
/* because rndis_command() sleeps we need to use workqueue */
priv->workqueue = create_singlethread_workqueue("rndis_wlan");
+ if (!priv->workqueue) {
+ wiphy_free(wiphy);
+ return -ENOMEM;
+ }
INIT_WORK(&priv->work, rndis_wlan_worker);
INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 1c539c83e8cf..5e41bf04ef61 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c5effd6c6be9..01ca1d57b3d9 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1278,6 +1278,9 @@ static int eject_installer(struct usb_interface *intf)
u8 bulk_out_ep;
int r;
+ if (iface_desc->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1a9dadf7b3cc..1b287861e34f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1345,6 +1345,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
+ xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:
@@ -2037,7 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
+ break;
+
case XenbusStateUnknown:
+ wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@@ -2168,7 +2172,9 @@ static int xennet_remove(struct xenbus_device *dev)
xenbus_switch_state(dev, XenbusStateClosing);
wait_event(module_unload_q,
xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosing);
+ XenbusStateClosing ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
wait_event(module_unload_q,
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index af62c4c854f3..b4f31dad40d6 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -17,7 +17,7 @@
*/
#include <linux/module.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include <linux/firmware.h>
#include <linux/nfc.h>
#include <net/nfc/nci.h>
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index a7faa0bcc01e..fc8e78a29d77 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv,
/* Send the SPI packet */
err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion,
skb);
- if (err != 0) {
+ if (err)
nfc_err(priv->dev, "spi_send failed %d", err);
- kfree_skb(skb);
- }
+
return err;
}
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 1dc89248e58e..11d78b43cf76 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -242,10 +242,10 @@ static int pn533_i2c_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s\n", __func__);
- pn533_unregister_device(phy->priv);
-
free_irq(client->irq, phy);
+ pn533_unregister_device(phy->priv);
+
return 0;
}
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 9faaa9694d87..77db9795510f 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -286,8 +286,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
- set_capacity(disk, 0);
- device_add_disk(dev, disk);
if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
return -ENOMEM;
@@ -300,6 +298,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
+ device_add_disk(dev, disk);
revalidate_disk(disk);
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 7121453ec047..0c46ada027cf 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1392,8 +1392,6 @@ static int btt_blk_init(struct btt *btt)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt;
- set_capacity(btt->btt_disk, 0);
- device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
if (btt_meta_size(btt)) {
int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
@@ -1405,6 +1403,7 @@ static int btt_blk_init(struct btt *btt)
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
+ device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 0392eb8a0dea..8311a93cabd8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -812,16 +812,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
- size_t buf_len = 0, in_len = 0, out_len = 0;
static char out_env[ND_CMD_MAX_ENVELOPE];
static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
void __user *p = (void __user *) arg;
struct device *dev = &nvdimm_bus->dev;
- struct nd_cmd_pkg pkg;
const char *cmd_name, *dimm_name;
+ u32 in_len = 0, out_len = 0;
unsigned long cmd_mask;
+ struct nd_cmd_pkg pkg;
+ u64 buf_len = 0;
void *buf;
int rc, i;
@@ -882,7 +883,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
if (cmd == ND_CMD_CALL) {
- dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
+ dev_dbg(dev, "%s:%s, idx: %llu, in: %u, out: %u, len %llu\n",
__func__, dimm_name, pkg.nd_command,
in_len, out_len, buf_len);
@@ -912,9 +913,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
out_len += out_size;
}
- buf_len = out_len + in_len;
+ buf_len = (u64) out_len + (u64) in_len;
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
- dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
+ dev_dbg(dev, "%s:%s cmd: %s buf_len: %llu > %d\n", __func__,
dimm_name, cmd_name, buf_len,
ND_IOCTL_MAX_BUFLEN);
return -EINVAL;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index b8fb1ef1fc15..74257ac92490 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1747,7 +1747,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
}
if (i < nd_region->ndr_mappings) {
- struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
+ struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
/*
* Give up if we don't find an instance of a uuid at each
@@ -1755,7 +1755,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
* find a dimm with two instances of the same uuid.
*/
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
- dev_name(ndd->dev), nd_label->uuid);
+ nvdimm_name(nvdimm), nd_label->uuid);
rc = -EINVAL;
goto err;
}
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 42abdd2391c9..d6aa59ca68b9 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -563,6 +563,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
return altmap;
}
+static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+{
+ return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
+ ALIGN_DOWN(phys, nd_pfn->align));
+}
+
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -618,13 +624,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
start = nsio->res.start;
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
+ IORES_DESC_NONE) == REGION_MIXED
+ || !IS_ALIGNED(start + resource_size(&nsio->res),
+ nd_pfn->align)) {
size = resource_size(&nsio->res);
- end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+ end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+ start + size);
}
if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+ dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
dev_name(&ndns->dev), start_pad + end_trunc);
/*
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 719ee5fb2626..c823e9346389 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1204,7 +1204,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2039,6 +2040,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
mutex_lock(&ctrl->namespaces_mutex);
+
+ /* Forcibly start all queues to avoid having stuck requests */
+ blk_mq_start_hw_queues(ctrl->admin_q);
+
list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e48ecb9303ca..8cc856ecec95 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1263,7 +1263,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
/* If there is a reset ongoing, we shouldn't reset again. */
- if (work_busy(&dev->reset_work))
+ if (dev->ctrl.state == NVME_CTRL_RESETTING)
return false;
/* We shouldn't reset unless the controller is on fatal error state
@@ -1755,7 +1755,7 @@ static void nvme_reset_work(struct work_struct *work)
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
int result = -ENODEV;
- if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+ if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
/*
@@ -1765,9 +1765,6 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
- goto out;
-
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -1841,8 +1838,8 @@ static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
- if (work_busy(&dev->reset_work))
- return -ENODEV;
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
return 0;
@@ -1944,6 +1941,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
queue_work(nvme_workq, &dev->reset_work);
@@ -1987,6 +1985,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&dev->reset_work);
pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev)) {
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a0bccb54a9bd..466b285cef3e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2109,7 +2109,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
continue;
/* Allocate an alias_prop with enough space for the stem */
- ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
if (!ap)
continue;
memset(ap, 0, sizeof(*ap) + len + 1);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index f7a970120055..3cda60c036f9 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
str[i] = '_';
}
- return tsize;
+ return repend;
}
EXPORT_SYMBOL_GPL(of_device_get_modalias);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 502f5547a1f2..e9360d5cbcba 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -935,7 +935,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
int offset;
const char *p, *q, *options = NULL;
int l;
- const struct earlycon_id *match;
+ const struct earlycon_id **p_match;
const void *fdt = initial_boot_params;
offset = fdt_path_offset(fdt, "/chosen");
@@ -962,7 +962,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
return 0;
}
- for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+ p_match++) {
+ const struct earlycon_id *match = *p_match;
+
if (!match->compatible[0])
continue;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 78530d1714dc..bdce0679674c 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards {
netmos_9901,
netmos_9865,
quatech_sppxp100,
+ wch_ch382l,
};
@@ -2708,6 +2709,7 @@ static struct parport_pc_pci {
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+ /* wch_ch382l */ { 1, { { 2, -1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+ /* WCH CH382L PCI-E single parallel port card */
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 4fce494271cc..11bad826683e 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -32,6 +32,7 @@
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -175,8 +176,6 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
-#define PCIE_BDF(dev) (dev << 4)
#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -298,7 +297,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+ (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+ PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
/* Program PCIe Control 2 to disable strict ordering */
@@ -439,7 +439,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if (PCI_SLOT(devfn) != 0) {
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -458,7 +458,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
advk_writel(pcie, reg, PIO_CTRL);
/* Program the address registers */
- reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+ reg = PCIE_CONF_ADDR(bus->number, devfn, where);
advk_writel(pcie, reg, PIO_ADDR_LS);
advk_writel(pcie, 0, PIO_ADDR_MS);
@@ -493,7 +493,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int offset;
int ret;
- if (PCI_SLOT(devfn) != 0)
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index dafb4cdb2b7f..d392a55ec0a9 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -351,6 +351,7 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
+ hv_pcibus_removed,
hv_pcibus_maximum
};
@@ -1205,9 +1206,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
hbus->pci_bus->msi = &hbus->msi_chip;
hbus->pci_bus->msi->dev = &hbus->hdev->device;
+ pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_bus_assign_resources(hbus->pci_bus);
pci_bus_add_devices(hbus->pci_bus);
+ pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
}
@@ -1489,13 +1492,24 @@ static void pci_devices_present_work(struct work_struct *work)
put_pcichild(hpdev, hv_pcidev_ref_initial);
}
- /* Tell the core to rescan bus because there may have been changes. */
- if (hbus->state == hv_pcibus_installed) {
+ switch(hbus->state) {
+ case hv_pcibus_installed:
+ /*
+ * Tell the core to rescan bus
+ * because there may have been changes.
+ */
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
- } else {
+ break;
+
+ case hv_pcibus_init:
+ case hv_pcibus_probed:
survey_child_resources(hbus);
+ break;
+
+ default:
+ break;
}
up(&hbus->enum_sem);
@@ -1585,8 +1599,10 @@ static void hv_eject_device_work(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
wslot);
if (pdev) {
+ pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
}
memset(&ctxt, 0, sizeof(ctxt));
@@ -2170,6 +2186,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+ hbus->state = hv_pcibus_init;
/*
* The PCI bus "domain" is what is called "segment" in ACPI and
@@ -2312,6 +2329,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_stop_root_bus(hbus->pci_bus);
pci_remove_root_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
+ hbus->state = hv_pcibus_removed;
}
ret = hv_send_resources_released(hdev);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index af8f6e92e885..b3a87159e457 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -861,7 +861,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* setup bus numbers */
val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
- val |= 0x00010100;
+ val |= 0x00ff0100;
dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
/* setup command register */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a46b585fae31..d44b55879c67 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
unsigned long long sta = 0;
struct acpiphp_func *func;
+ u32 dvid;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
if (ACPI_SUCCESS(status) && sta)
break;
} else {
- u32 dvid;
-
- pci_bus_read_config_dword(slot->bus,
- PCI_DEVFN(slot->device,
- func->function),
- PCI_VENDOR_ID, &dvid);
- if (dvid != 0xffffffff) {
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, func->function),
+ &dvid, 0)) {
sta = ACPI_STA_ALL;
break;
}
}
}
+ if (!sta) {
+ /*
+ * Check for the slot itself since it may be that the
+ * ACPI slot is a device below PCIe upstream port so in
+ * that case it may not even be reachable yet.
+ */
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, 0), &dvid, 0)) {
+ sta = ACPI_STA_ALL;
+ }
+ }
+
return (unsigned int)sta;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 802997e2ddcc..d81ad841dc0c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -463,8 +463,6 @@ static void pci_device_shutdown(struct device *dev)
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
- pci_msi_shutdown(pci_dev);
- pci_msix_shutdown(pci_dev);
/*
* If this is a kexec reboot, turn off Bus Master bit on the
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a87c8e1aef68..9c13aeeeb973 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3756,27 +3756,49 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
-/*
- * We should only need to wait 100ms after FLR, but some devices take longer.
- * Wait for up to 1000ms for config space to return something other than -1.
- * Intel IGD requires this when an LCD panel is attached. We read the 2nd
- * dword because VFs don't implement the 1st dword.
- */
static void pci_flr_wait(struct pci_dev *dev)
{
- int i = 0;
+ int delay = 1, timeout = 60000;
u32 id;
- do {
- msleep(100);
+ /*
+ * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
+ * 100ms, but may silently discard requests while the FLR is in
+ * progress. Wait 100ms before trying to access the device.
+ */
+ msleep(100);
+
+ /*
+ * After 100ms, the device should not silently discard config
+ * requests, but it may still indicate that it needs more time by
+ * responding to them with CRS completions. The Root Port will
+ * generally synthesize ~0 data to complete the read (except when
+ * CRS SV is enabled and the read was for the Vendor ID; in that
+ * case it synthesizes 0x0001 data).
+ *
+ * Wait for the device to return a non-CRS completion. Read the
+ * Command register instead of Vendor ID so we don't have to
+ * contend with the CRS SV value.
+ */
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ while (id == ~0) {
+ if (delay > timeout) {
+ dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
+ 100 + delay - 1);
+ return;
+ }
+
+ if (delay > 1000)
+ dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
+ 100 + delay - 1);
+
+ msleep(delay);
+ delay *= 2;
pci_read_config_dword(dev, PCI_COMMAND, &id);
- } while (i++ < 10 && id == ~0);
+ }
- if (id == ~0)
- dev_warn(&dev->dev, "Failed to return from FLR\n");
- else if (i > 1)
- dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
- (i - 1) * 100);
+ if (delay > 1000)
+ dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
}
static int pcie_flr(struct pci_dev *dev, int probe)
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0916b126923..6643a7bc381c 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -526,10 +526,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
- * hierarchies.
+ * hierarchies. Note that some PCIe host implementations omit
+ * the root ports entirely, in which case a downstream port on
+ * a switch may become the root of the link state chain for all
+ * its subordinate endpoints.
*/
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
+ !pdev->bus->parent->self) {
link->root = link;
} else {
struct pcie_link_state *parent;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a98be6db7e93..56340abe4fc6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -231,7 +231,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
- mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+ mask64 = PCI_ROM_ADDRESS_MASK;
}
if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 98eba9127a0b..fb177dc576d6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3369,22 +3369,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
- pci_set_vpd_size(dev, 8192);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+ int chip = (dev->device & 0xf000) >> 12;
+ int func = (dev->device & 0x0f00) >> 8;
+ int prod = (dev->device & 0x00ff) >> 0;
+
+ /*
+ * If this is a T3-based adapter, there's a 1KB VPD area at offset
+ * 0xc00 which contains the preferred VPD values. If this is a T4 or
+ * later based adapter, the special VPD is at offset 0x400 for the
+ * Physical Functions (the SR-IOV Virtual Functions have no VPD
+ * Capabilities). The PCI VPD Access core routines will normally
+ * compute the size of the VPD by parsing the VPD Data Structure at
+ * offset 0x000. This will result in silent failures when attempting
+ * to accesses these other VPD areas which are beyond those computed
+ * limits.
+ */
+ if (chip == 0x0 && prod >= 0x20)
+ pci_set_vpd_size(dev, 8192);
+ else if (chip >= 0x4 && func < 0x8)
+ pci_set_vpd_size(dev, 2048);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_extend_vpd);
#ifdef CONFIG_ACPI
/*
@@ -3870,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
+ quirk_dma_func1_alias);
/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
PCI_DEVICE_ID_JMICRON_JMB388_ESD,
@@ -4097,6 +4106,9 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
*/
acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
+ if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
+ return -ENOTTY;
+
return acs_flags ? 0 : 1;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 4bc589ee78d0..85774b7a316a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
} else if (resno == PCI_ROM_RESOURCE) {
- mask = (u32)PCI_ROM_ADDRESS_MASK;
+ mask = PCI_ROM_ADDRESS_MASK;
} else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b57294566..af82edc7fa5c 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -322,10 +322,16 @@ validate_group(struct perf_event *event)
return 0;
}
+static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
+{
+ struct platform_device *pdev = armpmu->plat_device;
+
+ return pdev ? dev_get_platdata(&pdev->dev) : NULL;
+}
+
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -337,8 +343,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
- plat_device = armpmu->plat_device;
- plat = dev_get_platdata(&plat_device->dev);
+
+ plat = armpmu_get_platdata(armpmu);
start_clock = sched_clock();
if (plat && plat->handle_irq)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index fb38e208f32d..735d8f7f9d71 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -992,19 +992,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p,
EXPORT_SYMBOL_GPL(pinctrl_lookup_state);
/**
- * pinctrl_select_state() - select/activate/program a pinctrl state to HW
+ * pinctrl_commit_state() - select/activate/program a pinctrl state to HW
* @p: the pinctrl handle for the device that requests configuration
* @state: the state handle to select/activate/program
*/
-int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
+static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
struct pinctrl_state *old_state = p->state;
int ret;
- if (p->state == state)
- return 0;
-
if (p->state) {
/*
* For each pinmux setting in the old state, forget SW's record
@@ -1068,6 +1065,19 @@ unapply_new_state:
return ret;
}
+
+/**
+ * pinctrl_select_state() - select/activate/program a pinctrl state to HW
+ * @p: the pinctrl handle for the device that requests configuration
+ * @state: the state handle to select/activate/program
+ */
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
+{
+ if (p->state == state)
+ return 0;
+
+ return pinctrl_commit_state(p, state);
+}
EXPORT_SYMBOL_GPL(pinctrl_select_state);
static void devm_pinctrl_release(struct device *dev, void *res)
@@ -1236,7 +1246,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
{
if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
- return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
+ return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep);
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
@@ -1248,7 +1258,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
int pinctrl_force_default(struct pinctrl_dev *pctldev)
{
if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
- return pinctrl_select_state(pctldev->p, pctldev->hog_default);
+ return pinctrl_commit_state(pctldev->p, pctldev->hog_default);
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_force_default);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 0a965026b134..fc5b18d3db20 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -46,6 +46,9 @@
#define BYT_TRIG_POS BIT(25)
#define BYT_TRIG_LVL BIT(24)
#define BYT_DEBOUNCE_EN BIT(20)
+#define BYT_GLITCH_FILTER_EN BIT(19)
+#define BYT_GLITCH_F_SLOW_CLK BIT(17)
+#define BYT_GLITCH_F_FAST_CLK BIT(16)
#define BYT_PULL_STR_SHIFT 9
#define BYT_PULL_STR_MASK (3 << BYT_PULL_STR_SHIFT)
#define BYT_PULL_STR_2K (0 << BYT_PULL_STR_SHIFT)
@@ -1579,6 +1582,9 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
*/
value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
BYT_TRIG_LVL);
+ /* Enable glitch filtering */
+ value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK |
+ BYT_GLITCH_F_FAST_CLK;
writel(value, reg);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index df63b7d997e8..b40a074822cf 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -368,18 +368,6 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
writel(value, padcfg0);
}
-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
-{
- u32 value;
-
- /* Put the pad into GPIO mode */
- value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
- /* Disable SCI/SMI/NMI generation */
- value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
- value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
- writel(value, padcfg0);
-}
-
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@@ -387,6 +375,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
+ u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -396,7 +385,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
}
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
- intel_gpio_set_gpio_mode(padcfg0);
+ /* Put the pad into GPIO mode */
+ value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+ /* Disable SCI/SMI/NMI generation */
+ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+ value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+ writel(value, padcfg0);
+
/* Disable TX buffer and enable RX (this will be input) */
__intel_gpio_set_direction(padcfg0, true);
@@ -775,8 +770,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- intel_gpio_set_gpio_mode(reg);
-
value = readl(reg);
value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7511723c6b05..257c1c2e5888 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -138,7 +138,6 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
MESON_PIN(GPIOX_19, EE_OFF),
MESON_PIN(GPIOX_20, EE_OFF),
MESON_PIN(GPIOX_21, EE_OFF),
- MESON_PIN(GPIOX_22, EE_OFF),
MESON_PIN(GPIOCLK_0, EE_OFF),
MESON_PIN(GPIOCLK_1, EE_OFF),
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 49bf7dcb7ed8..f826793e972c 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1278,8 +1278,16 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
u32 data;
+ int ret;
+ ret = clk_enable(bank->clk);
+ if (ret < 0) {
+ dev_err(bank->drvdata->dev,
+ "failed to enable clock for bank %s\n", bank->name);
+ return ret;
+ }
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
+ clk_disable(bank->clk);
return !(data & BIT(offset));
}
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 04053fe1e980..cfa3e850c49f 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -60,12 +60,14 @@ static int send_command(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
int ret;
+ int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
if (ec_dev->proto_version > 2)
- ret = ec_dev->pkt_xfer(ec_dev, msg);
+ xfer_fxn = ec_dev->pkt_xfer;
else
- ret = ec_dev->cmd_xfer(ec_dev, msg);
+ xfer_fxn = ec_dev->cmd_xfer;
+ ret = (*xfer_fxn)(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
struct cros_ec_command *status_msg;
@@ -88,7 +90,7 @@ static int send_command(struct cros_ec_device *ec_dev,
for (i = 0; i < EC_COMMAND_RETRIES; i++) {
usleep_range(10000, 11000);
- ret = ec_dev->cmd_xfer(ec_dev, status_msg);
+ ret = (*xfer_fxn)(ec_dev, status_msg);
if (ret < 0)
break;
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..24f1630a8b3f 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev,
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: EC error %d\n", msg->result);
else {
- msg->data[sizeof(msg->data) - 1] = '\0';
+ msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: %s\n", msg->data);
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 6eb2837f6b89..687cc5b922ee 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -120,6 +120,10 @@ static struct quirk_entry quirk_asus_x550lb = {
.xusb2pr = 0x01D9,
};
+static struct quirk_entry quirk_asus_ux330uak = {
+ .wmi_force_als_set = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
@@ -152,6 +156,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X302UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X401U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -413,6 +426,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX330UAK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"),
+ },
+ .driver_data = &quirk_asus_ux330uak,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X550LB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 18716025b1db..c0983281fca4 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -145,8 +145,10 @@ static int asus_wireless_remove(struct acpi_device *adev)
{
struct asus_wireless_data *data = acpi_driver_data(adev);
- if (data->wq)
+ if (data->wq) {
+ devm_led_classdev_unregister(&adev->dev, &data->led);
destroy_workqueue(data->wq);
+ }
return 0;
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8499d3ae4257..8a1bfd489c26 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1109,6 +1109,15 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
}
/*
+ * Some devices dont support or have borcken get_als method
+ * but still support set method.
+ */
+static void asus_wmi_set_als(void)
+{
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_ALS_ENABLE, 1, NULL);
+}
+
+/*
* Hwmon device
*/
static int asus_hwmon_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
@@ -2120,6 +2129,9 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_rfkill;
}
+ if (asus->driver->quirks->wmi_force_als_set)
+ asus_wmi_set_als();
+
/* Some Asus desktop boards export an acpi-video backlight interface,
stop this from showing up */
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index fdff626c3b51..5db052d1de1e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -45,6 +45,7 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
bool wmi_backlight_native;
+ bool wmi_force_als_set;
int wapf;
/*
* For machines with AMD graphic chips, it will send out WMI event
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 78080763df51..a74340dff530 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -37,6 +37,10 @@ static const struct acpi_device_id intel_vbtn_ids[] = {
static const struct key_entry intel_vbtn_keymap[] = {
{ KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */
{ KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */
+ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */
+ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */
+ { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
{ KE_END },
};
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 5cee9aa87aa3..48a11fd86a7f 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
/* Enable backup battery charging */
- abx500_mask_and_set_register_interruptible(di->dev,
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG,
RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
- if (ret < 0)
+ if (ret < 0) {
dev_err(di->dev, "%s mask and set failed\n", __func__);
+ goto out;
+ }
if (is_ab8540(di->parent)) {
ret = abx500_mask_and_set_register_interruptible(di->dev,
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 73e2f0b79dd4..c4770a94cc8e 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1569,6 +1569,11 @@ static int bq2415x_probe(struct i2c_client *client,
acpi_id =
acpi_match_device(client->dev.driver->acpi_match_table,
&client->dev);
+ if (!acpi_id) {
+ dev_err(&client->dev, "failed to match device name\n");
+ ret = -ENODEV;
+ goto error_1;
+ }
name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
}
if (!name) {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 50171fd3cc6d..8ed2782d1bb1 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -506,6 +506,9 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi)
int ret, limit = 100;
u8 v;
+ if (device_property_read_bool(bdi->dev, "disable-reset"))
+ return 0;
+
/* Reset the registers */
ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
BQ24190_REG_POC_RESET_MASK,
@@ -1184,8 +1187,13 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
}
} while (f_reg && ++i < 2);
+ /* ignore over/under voltage fault after disconnect */
+ if (f_reg == (1 << BQ24190_REG_F_CHRG_FAULT_SHIFT) &&
+ !(ss_reg & BQ24190_REG_SS_PG_STAT_MASK))
+ f_reg = 0;
+
if (f_reg != bdi->f_reg) {
- dev_info(bdi->dev,
+ dev_warn(bdi->dev,
"Fault: boost %d, charge %d, battery %d, ntc %d\n",
!!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
!!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c
index 4cd6899b961e..95af5f305838 100644
--- a/drivers/power/supply/isp1704_charger.c
+++ b/drivers/power/supply/isp1704_charger.c
@@ -418,6 +418,10 @@ static int isp1704_charger_probe(struct platform_device *pdev)
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct isp1704_charger_data), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto fail0;
+ }
pdata->enable_gpio = gpio;
dev_info(&pdev->dev, "init gpio %d\n", pdata->enable_gpio);
diff --git a/drivers/power/supply/pda_power.c b/drivers/power/supply/pda_power.c
index dfe1ee89f7c7..922a86787c5c 100644
--- a/drivers/power/supply/pda_power.c
+++ b/drivers/power/supply/pda_power.c
@@ -30,9 +30,9 @@ static inline unsigned int get_irq_flags(struct resource *res)
static struct device *dev;
static struct pda_power_pdata *pdata;
static struct resource *ac_irq, *usb_irq;
-static struct timer_list charger_timer;
-static struct timer_list supply_timer;
-static struct timer_list polling_timer;
+static struct delayed_work charger_work;
+static struct delayed_work polling_work;
+static struct delayed_work supply_work;
static int polling;
static struct power_supply *pda_psy_ac, *pda_psy_usb;
@@ -140,7 +140,7 @@ static void update_charger(void)
}
}
-static void supply_timer_func(unsigned long unused)
+static void supply_work_func(struct work_struct *work)
{
if (ac_status == PDA_PSY_TO_CHANGE) {
ac_status = new_ac_status;
@@ -161,11 +161,12 @@ static void psy_changed(void)
* Okay, charger set. Now wait a bit before notifying supplicants,
* charge power should stabilize.
*/
- mod_timer(&supply_timer,
- jiffies + msecs_to_jiffies(pdata->wait_for_charger));
+ cancel_delayed_work(&supply_work);
+ schedule_delayed_work(&supply_work,
+ msecs_to_jiffies(pdata->wait_for_charger));
}
-static void charger_timer_func(unsigned long unused)
+static void charger_work_func(struct work_struct *work)
{
update_status();
psy_changed();
@@ -184,13 +185,14 @@ static irqreturn_t power_changed_isr(int irq, void *power_supply)
* Wait a bit before reading ac/usb line status and setting charger,
* because ac/usb status readings may lag from irq.
*/
- mod_timer(&charger_timer,
- jiffies + msecs_to_jiffies(pdata->wait_for_status));
+ cancel_delayed_work(&charger_work);
+ schedule_delayed_work(&charger_work,
+ msecs_to_jiffies(pdata->wait_for_status));
return IRQ_HANDLED;
}
-static void polling_timer_func(unsigned long unused)
+static void polling_work_func(struct work_struct *work)
{
int changed = 0;
@@ -211,8 +213,9 @@ static void polling_timer_func(unsigned long unused)
if (changed)
psy_changed();
- mod_timer(&polling_timer,
- jiffies + msecs_to_jiffies(pdata->polling_interval));
+ cancel_delayed_work(&polling_work);
+ schedule_delayed_work(&polling_work,
+ msecs_to_jiffies(pdata->polling_interval));
}
#if IS_ENABLED(CONFIG_USB_PHY)
@@ -250,8 +253,9 @@ static int otg_handle_notification(struct notifier_block *nb,
* Wait a bit before reading ac/usb line status and setting charger,
* because ac/usb status readings may lag from irq.
*/
- mod_timer(&charger_timer,
- jiffies + msecs_to_jiffies(pdata->wait_for_status));
+ cancel_delayed_work(&charger_work);
+ schedule_delayed_work(&charger_work,
+ msecs_to_jiffies(pdata->wait_for_status));
return NOTIFY_OK;
}
@@ -300,8 +304,8 @@ static int pda_power_probe(struct platform_device *pdev)
if (!pdata->ac_max_uA)
pdata->ac_max_uA = 500000;
- setup_timer(&charger_timer, charger_timer_func, 0);
- setup_timer(&supply_timer, supply_timer_func, 0);
+ INIT_DELAYED_WORK(&charger_work, charger_work_func);
+ INIT_DELAYED_WORK(&supply_work, supply_work_func);
ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac");
usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb");
@@ -385,9 +389,10 @@ static int pda_power_probe(struct platform_device *pdev)
if (polling) {
dev_dbg(dev, "will poll for status\n");
- setup_timer(&polling_timer, polling_timer_func, 0);
- mod_timer(&polling_timer,
- jiffies + msecs_to_jiffies(pdata->polling_interval));
+ INIT_DELAYED_WORK(&polling_work, polling_work_func);
+ cancel_delayed_work(&polling_work);
+ schedule_delayed_work(&polling_work,
+ msecs_to_jiffies(pdata->polling_interval));
}
if (ac_irq || usb_irq)
@@ -433,9 +438,9 @@ static int pda_power_remove(struct platform_device *pdev)
free_irq(ac_irq->start, pda_psy_ac);
if (polling)
- del_timer_sync(&polling_timer);
- del_timer_sync(&charger_timer);
- del_timer_sync(&supply_timer);
+ cancel_delayed_work_sync(&polling_work);
+ cancel_delayed_work_sync(&charger_work);
+ cancel_delayed_work_sync(&supply_work);
if (pdata->is_usb_online)
power_supply_unregister(pda_psy_usb);
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 14bde0db8c24..5b10b50f8686 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
power_zone->id = result;
idr_init(&power_zone->idr);
+ result = -ENOMEM;
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 86280b7e41f3..2aa5b37cc6d2 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -97,30 +97,26 @@ static s32 scaled_ppm_to_ppb(long ppm)
/* posix clock implementation */
-static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
+static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
{
tp->tv_sec = 0;
tp->tv_nsec = 1;
return 0;
}
-static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
+static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
- struct timespec64 ts = timespec_to_timespec64(*tp);
- return ptp->info->settime64(ptp->info, &ts);
+ return ptp->info->settime64(ptp->info, tp);
}
-static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp)
+static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
- struct timespec64 ts;
int err;
- err = ptp->info->gettime64(ptp->info, &ts);
- if (!err)
- *tp = timespec64_to_timespec(ts);
+ err = ptp->info->gettime64(ptp->info, tp);
return err;
}
@@ -133,7 +129,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
- struct timespec ts;
+ struct timespec64 ts;
ktime_t kt;
s64 delta;
@@ -146,7 +142,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- kt = timespec_to_ktime(ts);
+ kt = timespec64_to_ktime(ts);
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 1c85ecc9e7ac..0fcf94ffad32 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -156,8 +156,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (div < 0)
return div;
- /* Let the core driver set pwm->period if disabled and duty_ns == 0 */
- if (!pwm_is_enabled(pwm) && !duty_ns)
+ /*
+ * Let the core driver set pwm->period if disabled and duty_ns == 0.
+ * But, this driver should prevent to set the new duty_ns if current
+ * duty_cycle is not set
+ */
+ if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
return 0;
rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index e464582a390a..3439f1e902cb 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -145,7 +145,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
break;
case 2:
- offset = STMPE24XX_PWMIC1;
+ offset = STMPE24XX_PWMIC2;
break;
default:
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index e4647840cd6e..7e8906d6ab7a 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -76,6 +76,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
unsigned long long c = duty_ns;
unsigned long rate, hz;
+ unsigned long long ns100 = NSEC_PER_SEC;
u32 val = 0;
int err;
@@ -95,9 +96,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* cycles at the PWM clock rate will take period_ns nanoseconds.
*/
rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
- hz = NSEC_PER_SEC / period_ns;
- rate = (rate + (hz / 2)) / hz;
+ /* Consider precision in PWM_SCALE_WIDTH rate calculation */
+ ns100 *= 100;
+ hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns);
+ rate = DIV_ROUND_CLOSEST(rate * 100, hz);
/*
* Since the actual PWM divider is the register's frequency divider
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3a6d0290c54c..c5e272ea4372 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,6 +296,11 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
+ /* set the default voltage of the pcie phy to be 1.100v */
+ if (!sreg->sel && rdesc->name &&
+ !strcmp(rdesc->name, "vddpcie"))
+ sreg->sel = 0x10;
+
if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9403245503de..178fcda12cec 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2465,7 +2465,7 @@ static int _regulator_list_voltage(struct regulator *regulator,
ret = ops->list_voltage(rdev, selector);
if (lock)
mutex_unlock(&rdev->mutex);
- } else if (rdev->supply) {
+ } else if (rdev->is_switch && rdev->supply) {
ret = _regulator_list_voltage(rdev->supply, selector, lock);
} else {
return -EINVAL;
@@ -2523,7 +2523,7 @@ int regulator_count_voltages(struct regulator *regulator)
if (rdev->desc->n_voltages)
return rdev->desc->n_voltages;
- if (!rdev->supply)
+ if (!rdev->is_switch || !rdev->supply)
return -EINVAL;
return regulator_count_voltages(rdev->supply);
@@ -4049,6 +4049,11 @@ regulator_register(const struct regulator_desc *regulator_desc,
mutex_unlock(&regulator_list_mutex);
}
+ if (!rdev->desc->ops->get_voltage &&
+ !rdev->desc->ops->list_voltage &&
+ !rdev->desc->fixed_uV)
+ rdev->is_switch = true;
+
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 6ebd42aad291..25cf3069e2e7 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -227,6 +227,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
missing = year;
}
+ /* Can't proceed if alarm is still invalid after replacing
+ * missing fields.
+ */
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
+ goto done;
+
/* with luck, no rollover is needed */
t_now = rtc_tm_to_time64(&now);
t_alm = rtc_tm_to_time64(&alarm->time);
@@ -278,9 +285,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
dev_warn(&rtc->dev, "alarm rollover not handled\n");
}
-done:
err = rtc_valid_tm(&alarm->time);
+done:
if (err) {
dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7030d7cd3861..c554e529fc4e 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -41,6 +41,9 @@
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#ifdef CONFIG_X86
+#include <asm/i8259.h>
+#endif
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <linux/mc146818rtc.h>
@@ -1117,17 +1120,23 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
cmos_wake_setup(&pnp->dev);
- if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0))
+ if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
+ unsigned int irq = 0;
+#ifdef CONFIG_X86
/* Some machines contain a PNP entry for the RTC, but
* don't define the IRQ. It should always be safe to
- * hardcode it in these cases
+ * hardcode it on systems with a legacy PIC.
*/
+ if (nr_legacy_irqs())
+ irq = 8;
+#endif
return cmos_do_probe(&pnp->dev,
- pnp_get_resource(pnp, IORESOURCE_IO, 0), 8);
- else
+ pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+ } else {
return cmos_do_probe(&pnp->dev,
pnp_get_resource(pnp, IORESOURCE_IO, 0),
pnp_irq(pnp, 0));
+ }
}
static void cmos_pnp_remove(struct pnp_dev *pnp)
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 3b3049c8c9e0..c0eb113588ff 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -527,6 +527,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
+ /* the hardware's tick rate is 4096 Hz, so
+ * the counter value needs to be scaled accordingly
+ */
+ new_margin <<= 12;
if (new_margin < 1 || new_margin > 16777216)
return -EINVAL;
@@ -535,7 +539,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
ds1374_wdt_ping();
/* fallthrough */
case WDIOC_GETTIMEOUT:
- return put_user(wdt_margin, (int __user *)arg);
+ /* when returning ... inverse is true */
+ return put_user((wdt_margin >> 12), (int __user *)arg);
case WDIOC_SETOPTIONS:
if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
return -EFAULT;
@@ -543,14 +548,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
if (options & WDIOS_DISABLECARD) {
pr_info("disable watchdog\n");
ds1374_wdt_disable();
+ return 0;
}
if (options & WDIOS_ENABLECARD) {
pr_info("enable watchdog\n");
ds1374_wdt_settimeout(wdt_margin);
ds1374_wdt_ping();
+ return 0;
}
-
return -EINVAL;
}
return -ENOTTY;
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 58698d21c2c3..c4ca6a385790 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -168,6 +168,7 @@ static int m41t80_get_datetime(struct i2c_client *client,
/* Sets the given date and time to the real time clock. */
static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
+ struct m41t80_data *clientdata = i2c_get_clientdata(client);
unsigned char buf[8];
int err, flags;
@@ -183,6 +184,17 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100);
buf[M41T80_REG_WDAY] = tm->tm_wday;
+ /* If the square wave output is controlled in the weekday register */
+ if (clientdata->features & M41T80_FEATURE_SQ_ALT) {
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, M41T80_REG_WDAY);
+ if (val < 0)
+ return val;
+
+ buf[M41T80_REG_WDAY] |= (val & 0xf0);
+ }
+
err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC,
sizeof(buf), buf);
if (err < 0) {
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index e4324dcf9508..180ec1f8c917 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
{
- long rc = OPAL_BUSY;
+ s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d;
u64 h_m_s_ms;
@@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (retries-- && (rc == OPAL_HARDWARE
- || rc == OPAL_INTERNAL_ERROR))
- msleep(10);
- else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
- break;
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+ if (retries--) {
+ msleep(10); /* Wait 10ms before retry */
+ rc = OPAL_BUSY; /* go around again */
+ }
+ }
}
if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
{
- long rc = OPAL_BUSY;
+ s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d = 0;
u64 h_m_s_ms = 0;
tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_write(y_m_d, h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (retries-- && (rc == OPAL_HARDWARE
- || rc == OPAL_INTERNAL_ERROR))
- msleep(10);
- else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
- break;
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+ if (retries--) {
+ msleep(10); /* Wait 10ms before retry */
+ rc = OPAL_BUSY; /* go around again */
+ }
+ }
}
return rc == OPAL_SUCCESS ? 0 : -EIO;
@@ -150,6 +159,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
+
+ /* check if no alarm is set */
+ if (y_m_d == 0 && h_m_s_ms == 0) {
+ pr_debug("No alarm is set\n");
+ rc = -ENOENT;
+ goto exit;
+ } else {
+ pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
+ }
+
opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
exit:
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 0f11c2a228e3..a753ef9c1459 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -257,7 +257,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
}
- if (!data->regmap) {
+ if (IS_ERR(data->regmap)) {
dev_err(&pdev->dev, "Can't find snvs syscon\n");
return -ENODEV;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5ecd40884f01..0da246505f70 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1950,8 +1950,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
{
int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
- /* dasd is being set offline. */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * dasd is being set offline
+ * but it is no safe offline where we have to allow I/O
+ */
return 1;
}
if (device->stopped) {
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 8305ab688d57..a20dc2940d2d 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2755,6 +2755,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 1e560188dd13..e453d2a7d7f9 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -591,13 +591,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct alias_lcu *lcu;
+ __u8 uaddr = private->uid.real_unit_addr;
+ struct alias_lcu *lcu = private->lcu;
unsigned long flags;
int rc;
- lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * Check if device and lcu type differ. If so, the uac data may be
+ * outdated and needs to be updated.
+ */
+ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
+ lcu->flags |= UPDATE_PENDING;
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "uid type mismatch - trigger rescan");
+ }
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 41e28b23b26a..8ac27efe34fc 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
# S/390 character devices
#
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 11674698b36d..67903c93328b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 71bf9bded485..66e9bb053629 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
- int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
@@ -149,14 +149,7 @@ again:
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
- /*
- * Retry once, if that fails bail out and process the
- * extracted buffers before trying again.
- */
- if (!retried++)
- goto again;
- else
- return count - tmp_count;
+ return count - tmp_count;
}
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
@@ -212,7 +205,10 @@ again:
return 0;
}
-/* returns number of examined buffers and their common state in *state */
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack, int merge_pending)
@@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
- for (i = 0; i < count; i++) {
- if (!__state) {
- __state = q->slsb.val[bufnr];
- if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
- __state = SLSB_P_OUTPUT_EMPTY;
- } else if (merge_pending) {
- if ((q->slsb.val[bufnr] & __state) != __state)
- break;
- } else if (q->slsb.val[bufnr] != __state)
- break;
+ /* get initial state: */
+ __state = q->slsb.val[bufnr];
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+
+ for (i = 1; i < count; i++) {
bufnr = next_buf(bufnr);
+
+ /* merge PENDING into EMPTY: */
+ if (merge_pending &&
+ q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+ __state == SLSB_P_OUTPUT_EMPTY)
+ continue;
+
+ /* stop if next state differs from initial state: */
+ if (q->slsb.val[bufnr] != __state)
+ break;
}
*state = __state;
return i;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 9b5fc502f6a1..403712bf1ddf 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -592,6 +592,11 @@ struct qeth_cmd_buffer {
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
};
+static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
+{
+ return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+}
+
/**
* definition of a qeth channel, used for read and write
*/
@@ -849,7 +854,7 @@ struct qeth_trap_id {
*/
static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
{
- return PFN_UP(end - 1) - PFN_DOWN(start);
+ return PFN_UP(end) - PFN_DOWN(start);
}
static inline int qeth_get_micros(void)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index df8f74cb1406..283416aefa56 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -522,8 +522,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
-
-static int qeth_issue_next_read(struct qeth_card *card)
+static int __qeth_issue_next_read(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -554,6 +553,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
return rc;
}
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+ int ret;
+
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ ret = __qeth_issue_next_read(card);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+
+ return ret;
+}
+
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
@@ -957,7 +967,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- wake_up(&card->wait_q);
+ wake_up_all(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
@@ -1161,6 +1171,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
rc = qeth_get_problem(cdev, irb);
if (rc) {
+ card->read_or_write_problem = 1;
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@@ -1179,7 +1190,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
- qeth_issue_next_read(card);
+ __qeth_issue_next_read(card);
iob = channel->iob;
index = channel->buf_no;
@@ -2050,7 +2061,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
@@ -2064,23 +2075,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
reply->callback = reply_cb;
reply->param = reply_param;
- if (card->state == CARD_STATE_DOWN)
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
- else
- reply->seqno = card->seqno.ipa++;
+
init_waitqueue_head(&reply->wait_q);
- spin_lock_irqsave(&card->lock, flags);
- list_add_tail(&reply->list, &card->cmd_waiter_list);
- spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
- qeth_prepare_control_data(card, len, iob);
- if (IS_IPA(iob->data))
+ if (IS_IPA(iob->data)) {
+ cmd = __ipa_cmd(iob);
+ cmd->hdr.seqno = card->seqno.ipa++;
+ reply->seqno = cmd->hdr.seqno;
event_timeout = QETH_IPA_TIMEOUT;
- else
+ } else {
+ reply->seqno = QETH_IDX_COMMAND_SEQNO;
event_timeout = QETH_TIMEOUT;
+ }
+ qeth_prepare_control_data(card, len, iob);
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
+ spin_unlock_irqrestore(&card->lock, flags);
+
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
@@ -2105,9 +2120,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
/* we have only one long running ipassist, since we can ensure
process context of this command we can sleep */
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- if ((cmd->hdr.command == IPA_CMD_SETIP) &&
- (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
+ if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
+ cmd->hdr.prot_version == QETH_PROT_IPV4) {
if (!wait_event_timeout(reply->wait_q,
atomic_read(&reply->received), event_timeout))
goto time_err;
@@ -2871,7 +2885,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
- cmd->hdr.seqno = card->seqno.ipa;
+ /* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
if (card->options.layer2)
@@ -3852,10 +3866,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int extra_elems, int data_offset)
{
- int elements = qeth_get_elements_for_range(
- (addr_t)skb->data + data_offset,
- (addr_t)skb->data + skb_headlen(skb)) +
- qeth_get_elements_for_frags(skb);
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
+ int elements = qeth_get_elements_for_frags(skb);
+ addr_t start = (addr_t)skb->data + data_offset;
+
+ if (start != end)
+ elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
@@ -4984,8 +5000,6 @@ static void qeth_core_free_card(struct qeth_card *card)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
- if (card->dev)
- free_netdev(card->dev);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5082dfeacb95..e94e9579914e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1057,8 +1057,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
return;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index eedf9b01a496..573569474e44 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -39,8 +39,40 @@ struct qeth_ipaddr {
unsigned int pfxlen;
} a6;
} u;
-
};
+
+static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
+ struct qeth_ipaddr *a2)
+{
+ if (a1->proto != a2->proto)
+ return false;
+ if (a1->proto == QETH_PROT_IPV6)
+ return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
+ return a1->u.a4.addr == a2->u.a4.addr;
+}
+
+static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
+ struct qeth_ipaddr *a2)
+{
+ /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
+ * so 'proto' and 'addr' match for sure.
+ *
+ * For ucast:
+ * - 'mac' is always 0.
+ * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
+ * values are required to avoid mixups in takeover eligibility.
+ *
+ * For mcast,
+ * - 'mac' is mapped from the IP, and thus always matches.
+ * - 'mask'/'pfxlen' is always 0.
+ */
+ if (a1->type != a2->type)
+ return false;
+ if (a1->proto == QETH_PROT_IPV6)
+ return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
+ return a1->u.a4.mask == a2->u.a4.mask;
+}
+
static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{
u64 ret = 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 1487f8a0c575..4ca161bdc696 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -154,6 +154,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
return -EINVAL;
}
+static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
+ struct qeth_ipaddr *query)
+{
+ u64 key = qeth_l3_ipaddr_hash(query);
+ struct qeth_ipaddr *addr;
+
+ if (query->is_multicast) {
+ hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
+ if (qeth_l3_addr_match_ip(addr, query))
+ return addr;
+ } else {
+ hash_for_each_possible(card->ip_htable, addr, hnode, key)
+ if (qeth_l3_addr_match_ip(addr, query))
+ return addr;
+ }
+ return NULL;
+}
+
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
{
int i, j;
@@ -207,34 +225,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return rc;
}
-inline int
-qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
-{
- return addr1->proto == addr2->proto &&
- !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
- !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
-}
-
-static struct qeth_ipaddr *
-qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
-{
- struct qeth_ipaddr *addr;
-
- if (tmp_addr->is_multicast) {
- hash_for_each_possible(card->ip_mc_htable, addr,
- hnode, qeth_l3_ipaddr_hash(tmp_addr))
- if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
- return addr;
- } else {
- hash_for_each_possible(card->ip_htable, addr,
- hnode, qeth_l3_ipaddr_hash(tmp_addr))
- if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
- return addr;
- }
-
- return NULL;
-}
-
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
int rc = 0;
@@ -249,8 +239,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
}
- addr = qeth_l3_ip_from_hash(card, tmp_addr);
- if (!addr)
+ addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
+ if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
return -ENOENT;
addr->ref_counter--;
@@ -259,12 +249,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
if (addr->in_progress)
return -EINPROGRESS;
- if (!qeth_card_hw_is_reachable(card)) {
- addr->disp_flag = QETH_DISP_ADDR_DELETE;
- return 0;
- }
-
- rc = qeth_l3_deregister_addr_entry(card, addr);
+ if (qeth_card_hw_is_reachable(card))
+ rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr);
@@ -276,6 +262,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
int rc = 0;
struct qeth_ipaddr *addr;
+ char buf[40];
QETH_CARD_TEXT(card, 4, "addip");
@@ -286,8 +273,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
}
- addr = qeth_l3_ip_from_hash(card, tmp_addr);
- if (!addr) {
+ addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
+ if (addr) {
+ if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
+ return -EADDRINUSE;
+ if (qeth_l3_addr_match_all(addr, tmp_addr)) {
+ addr->ref_counter++;
+ return 0;
+ }
+ qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
+ buf);
+ dev_warn(&card->gdev->dev,
+ "Registering IP address %s failed\n", buf);
+ return -EADDRINUSE;
+ } else {
addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
if (!addr)
return -ENOMEM;
@@ -327,18 +326,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
(rc == IPA_RC_LAN_OFFLINE)) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) {
- qeth_l3_delete_ip(card, addr);
+ qeth_l3_deregister_addr_entry(card, addr);
+ hash_del(&addr->hnode);
kfree(addr);
}
} else {
hash_del(&addr->hnode);
kfree(addr);
}
- } else {
- if (addr->type == QETH_IP_TYPE_NORMAL)
- addr->ref_counter++;
}
-
return rc;
}
@@ -406,11 +402,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
- qeth_l3_deregister_addr_entry(card, addr);
- hash_del(&addr->hnode);
- kfree(addr);
- } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock);
@@ -726,12 +718,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
return -ENOMEM;
spin_lock_bh(&card->ip_lock);
-
- if (qeth_l3_ip_from_hash(card, ipaddr))
- rc = -EEXIST;
- else
- qeth_l3_add_ip(card, ipaddr);
-
+ rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
@@ -794,12 +781,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
return -ENOMEM;
spin_lock_bh(&card->ip_lock);
-
- if (qeth_l3_ip_from_hash(card, ipaddr))
- rc = -EEXIST;
- else
- qeth_l3_add_ip(card, ipaddr);
-
+ rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
@@ -1444,8 +1426,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
memcpy(tmp->mac, buf, sizeof(tmp->mac));
tmp->is_multicast = 1;
- ipm = qeth_l3_ip_from_hash(card, tmp);
+ ipm = qeth_l3_find_addr_by_ip(card, tmp);
if (ipm) {
+ /* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1528,8 +1511,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
sizeof(struct in6_addr));
tmp->is_multicast = 1;
- ipm = qeth_l3_ip_from_hash(card, tmp);
+ ipm = qeth_l3_find_addr_by_ip(card, tmp);
if (ipm) {
+ /* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
@@ -2784,11 +2768,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
struct sk_buff *skb, int extra_elems)
{
- addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
- int elements = qeth_get_elements_for_range(
- tcpdptr,
- (addr_t)skb->data + skb_headlen(skb)) +
- qeth_get_elements_for_frags(skb);
+ addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
+ int elements = qeth_get_elements_for_frags(skb);
+
+ if (start != end)
+ elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2,
@@ -3207,8 +3192,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index be65da2988fb..838edbba8aec 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -246,6 +246,12 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
{
int rc = 0;
+ if (!tag || tag > MAX_MCC_CMD) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : invalid tag %u\n", tag);
+ return -EINVAL;
+ }
+
if (beiscsi_hba_in_error(phba)) {
clear_bit(MCC_TAG_STATE_RUNNING,
&phba->ctrl.ptag_state[tag].tag_state);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index fdd4eb4e41b2..e58786f797d9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
struct bnx2fc_cmd_mgr *cmd_mgr;
spinlock_t hba_lock;
struct mutex hba_mutex;
+ struct mutex hba_stats_mutex;
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb6156f14..bee7d37367ca 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -670,15 +670,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
if (!fw_stats)
return NULL;
+ mutex_lock(&hba->hba_stats_mutex);
+
bnx2fc_stats = fc_get_host_stats(shost);
init_completion(&hba->stat_req_done);
if (bnx2fc_send_stat_req(hba))
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
if (!rc) {
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
}
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -700,6 +702,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
memcpy(&hba->prev_stats, hba->stats_buffer,
sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+ mutex_unlock(&hba->hba_stats_mutex);
return bnx2fc_stats;
}
@@ -1348,6 +1353,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
}
spin_lock_init(&hba->hba_lock);
mutex_init(&hba->hba_mutex);
+ mutex_init(&hba->hba_stats_mutex);
hba->cnic = cnic;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdabc8894..dab195f04da7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
goto bye;
}
- mempool_free(mbp, hw->mb_mempool);
if (finicsum != cfcsum) {
csio_warn(hw,
"Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
rv = csio_hw_validate_caps(hw, mbp);
if (rv != 0)
goto bye;
+
+ mempool_free(mbp, hw->mb_mempool);
+ mbp = NULL;
+
/*
* Note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 44dd372aa7d3..c056b8111ad2 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1127,12 +1127,6 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
else
CMD_ABTS_STATUS(sc) = hdr_status;
- atomic64_dec(&fnic_stats->io_stats.active_ios);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
-
if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
@@ -1173,6 +1167,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
(((u64)CMD_FLAGS(sc) << 32) |
CMD_STATE(sc)));
sc->scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
}
}
@@ -1962,6 +1961,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Call SCSI completion function to complete the IO */
sc->result = (DID_ABORT << 16);
sc->scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
}
fnic_abort_cmd_end:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 9a0696f68f37..b81a53c4a9a8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes {
};
struct ibmvfc_fcp_rsp_info {
- __be16 reserved;
+ u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 532474109624..c5bc41d97f84 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -836,8 +836,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -5947,8 +5949,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -6338,8 +6342,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -6365,8 +6371,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4abd3fce5ab6..c2b682916337 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1980,6 +1989,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2083,7 +2105,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..12886f96b286 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f7e3f27bb5c5..e9ea8f4ea2c9 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11312,6 +11312,7 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
+ uint32_t wqesize;
/* Create FOF EQ */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
@@ -11332,8 +11333,11 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
phba->sli4_hba.wq_ecount);
+
if (!qdesc)
goto out_error;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f1df76a77b6..0902ed204ba8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -13696,6 +13696,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
switch (wq->entry_size) {
default:
case 64:
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 14c0334f41e4..26c67c42985c 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -55,6 +55,7 @@ struct mac_esp_priv {
int error;
};
static struct esp *esp_chips[2];
+static DEFINE_SPINLOCK(esp_chips_lock);
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
platform_get_drvdata((struct platform_device *) \
@@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev)
}
host->irq = IRQ_MAC_SCSI;
- esp_chips[dev->id] = esp;
- mb();
- if (esp_chips[!dev->id] == NULL) {
- err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
- if (err < 0) {
- esp_chips[dev->id] = NULL;
- goto fail_free_priv;
- }
+
+ /* The request_irq() call is intended to succeed for the first device
+ * and fail for the second device.
+ */
+ err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
+ spin_lock(&esp_chips_lock);
+ if (err < 0 && esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
+ goto fail_free_priv;
}
+ esp_chips[dev->id] = esp;
+ spin_unlock(&esp_chips_lock);
err = scsi_esp_register(esp, &dev->dev);
if (err)
@@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev)
return 0;
fail_free_irq:
- if (esp_chips[!dev->id] == NULL)
+ spin_lock(&esp_chips_lock);
+ esp_chips[dev->id] = NULL;
+ if (esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
free_irq(host->irq, esp);
+ } else
+ spin_unlock(&esp_chips_lock);
fail_free_priv:
kfree(mep);
fail_free_command_block:
@@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev)
scsi_esp_unregister(esp);
+ spin_lock(&esp_chips_lock);
esp_chips[dev->id] = NULL;
- if (!(esp_chips[0] || esp_chips[1]))
+ if (esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
free_irq(irq, NULL);
+ } else
+ spin_unlock(&esp_chips_lock);
kfree(mep);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 468acab04d3d..b8589068d175 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4065,19 +4065,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4103,6 +4090,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4124,6 +4124,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4154,6 +4155,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (mpi_request->DataLength) {
if (ioc->build_sg_scmd(ioc, scmd, smid)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8f12f6baa6b8..4441a559f139 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -369,6 +369,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
+ del_timer(&sp->u.iocb_cmd.timer);
complete(&abt->u.abt.comp);
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 94630d4738e6..baccd116f864 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1443,7 +1443,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
- int que, cnt;
+ int que, cnt, status;
unsigned long flags;
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -1473,8 +1473,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
*/
sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- qla2xxx_eh_abort(GET_CMD_SP(sp));
+ status = qla2xxx_eh_abort(GET_CMD_SP(sp));
spin_lock_irqsave(&ha->hardware_lock, flags);
+ /* Get rid of extra reference if immediate exit
+ * from ql2xxx_eh_abort */
+ if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(&sp->ref_count);
}
req->outstanding_cmds[cnt] = NULL;
sp->done(vha, sp, res);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 59059ffbb98c..11f45cb99892 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5789,7 +5789,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
fc_port_t *fcport;
int rc;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 26e6b05d05fc..43d4b30cbf65 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -180,7 +180,7 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
@@ -596,17 +596,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev,
int key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 84addee05be6..a5e30e9449ef 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ace56c5e61e1..14ba1a2c0b7c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1935,6 +1935,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 50adabbb5808..69046d342bc5 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -548,7 +548,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
ecomp = &edev->component[components++];
if (!IS_ERR(ecomp)) {
- ses_get_power_status(edev, ecomp);
if (addl_desc_ptr)
ses_process_descriptor(
ecomp,
@@ -579,13 +578,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ int refresh)
{
+ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
struct efd efd = {
.addr = 0,
};
- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ if (refresh)
+ ses_enclosure_data_process(edev, edev_sdev, 0);
if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
@@ -616,7 +618,7 @@ static int ses_intf_add(struct device *cdev,
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
- ses_match_to_enclosure(edev, sdev);
+ ses_match_to_enclosure(edev, sdev, 1);
prev = edev;
}
return -ENODEV;
@@ -728,7 +730,7 @@ static int ses_intf_add(struct device *cdev,
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
- ses_match_to_enclosure(edev, tmp_sdev);
+ ses_match_to_enclosure(edev, tmp_sdev, 0);
}
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cd9537ddc19f..f61b37109e5c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -524,6 +524,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
@@ -564,6 +565,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
}
err_out:
err2 = sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return err ? : err2 ? : count;
}
@@ -663,18 +665,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
- static char cmd[TASK_COMM_LEN];
- if (strcmp(current->comm, cmd)) {
- printk_ratelimited(KERN_WARNING
- "sg_write: data in/out %d/%d bytes "
- "for SCSI command 0x%x-- guessing "
- "data in;\n program %s not setting "
- "count and/or reply_len properly\n",
- old_hdr.reply_len - (int)SZ_SG_HEADER,
- input_size, (unsigned int) cmnd[0],
- current->comm);
- strcpy(cmd, current->comm);
- }
+ printk_ratelimited(KERN_WARNING
+ "sg_write: data in/out %d/%d bytes "
+ "for SCSI command 0x%x-- guessing "
+ "data in;\n program %s not setting "
+ "count and/or reply_len properly\n",
+ old_hdr.reply_len - (int)SZ_SG_HEADER,
+ input_size, (unsigned int) cmnd[0],
+ current->comm);
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
@@ -773,11 +771,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
+ if (hp->dxfer_len >= SZ_256M)
+ return -EINVAL;
+
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
@@ -790,6 +792,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return -ENODEV;
}
@@ -1280,6 +1283,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
@@ -1824,8 +1828,6 @@ sg_finish_rem_req(Sg_request *srp)
else
sg_remove_scat(sfp, req_schp);
- sg_remove_request(sfp, srp);
-
return ret;
}
@@ -2062,11 +2064,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
- break;
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return NULL;
}
/* always adds to end of list */
@@ -2172,12 +2175,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
Sg_request *srp;
+ unsigned long iflags;
/* Cleanup any responses which were never read(). */
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
}
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 2bf96d33428a..0dd1984e381e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -915,10 +915,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c680d7641311..cbc8e9388268 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -28,6 +28,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
#define VIRTIO_SCSI_MEMPOOL_SZ 64
@@ -705,6 +706,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
+static int virtscsi_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
+ * may have transfer limits which come from the host SCSI
+ * controller or something on the host side other than the
+ * target itself.
+ *
+ * To make this work properly, the hypervisor can adjust the
+ * target's VPD information to advertise these limits. But
+ * for that to work, the guest has to look at the VPD pages,
+ * which we won't do by default if it is an SPC-2 device, even
+ * if it does actually support it.
+ *
+ * So, set the blist to always try to read the VPD pages.
+ */
+ sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
+
/**
* virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
* @sdev: Virtscsi target whose queue depth to change
@@ -776,6 +799,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
@@ -795,6 +819,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 119054bc922b..2caacd9d2526 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2429,39 +2429,21 @@ struct cgr_comp {
struct completion completion;
};
-static int qman_delete_cgr_thread(void *p)
+static void qman_delete_cgr_smp_call(void *p)
{
- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
- int ret;
-
- ret = qman_delete_cgr(cgr_comp->cgr);
- complete(&cgr_comp->completion);
-
- return ret;
+ qman_delete_cgr((struct qman_cgr *)p);
}
void qman_delete_cgr_safe(struct qman_cgr *cgr)
{
- struct task_struct *thread;
- struct cgr_comp cgr_comp;
-
preempt_disable();
if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
- init_completion(&cgr_comp.completion);
- cgr_comp.cgr = cgr;
- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
- "cgr_del");
-
- if (IS_ERR(thread))
- goto out;
-
- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
- wake_up_process(thread);
- wait_for_completion(&cgr_comp.completion);
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
preempt_enable();
return;
}
-out:
+
qman_delete_cgr(cgr);
preempt_enable();
}
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2707a827261b..5482302d3d9e 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -163,11 +163,15 @@ EXPORT_SYMBOL(qe_issue_cmd);
*/
static unsigned int brg_clk = 0;
+#define CLK_GRAN (1000)
+#define CLK_GRAN_LIMIT (5)
+
unsigned int qe_get_brg_clk(void)
{
struct device_node *qe;
int size;
const u32 *prop;
+ unsigned int mod;
if (brg_clk)
return brg_clk;
@@ -185,6 +189,15 @@ unsigned int qe_get_brg_clk(void)
of_node_put(qe);
+ /* round this if near to a multiple of CLK_GRAN */
+ mod = brg_clk % CLK_GRAN;
+ if (mod) {
+ if (mod < CLK_GRAN_LIMIT)
+ brg_clk -= mod;
+ else if (mod > (CLK_GRAN - CLK_GRAN_LIMIT))
+ brg_clk += CLK_GRAN - mod;
+ }
+
return brg_clk;
}
EXPORT_SYMBOL(qe_get_brg_clk);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0e7415f6d093..b7995474148c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -156,7 +156,6 @@ config SPI_BCM63XX_HSSPI
config SPI_BCM_QSPI
tristate "Broadcom BSPI and MSPI controller support"
depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || COMPILE_TEST
- depends on MTD_NORFLASH
default ARCH_BCM_IPROC
help
Enables support for the Broadcom SPI flash and MSPI controller.
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8feac599e9ab..44be6b593b30 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1669,12 +1669,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
- spin_lock_irq(&as->lock);
if (as->use_dma) {
atmel_spi_stop_dma(as);
atmel_spi_release_dma(as);
}
+ spin_lock_irq(&as->lock);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 02fb96797ac8..0d8f43a17edb 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
+ if (dma_mapping_error(&spi->dev, t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 447497e9124c..d25cc4037e23 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -115,8 +115,8 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
- clk_disable_unprepare(dwsmmio->clk);
dw_spi_remove_host(&dwsmmio->dws);
+ clk_disable_unprepare(dwsmmio->clk);
return 0;
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index d5157b2222ce..a47cf638460a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -454,6 +454,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
int elements = 0;
int word_len, element_count;
struct omap2_mcspi_cs *cs = spi->controller_state;
+ void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
@@ -549,8 +551,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
elements--;
- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
- & OMAP2_MCSPI_CHSTAT_RXS)) {
+ if (!mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
@@ -568,8 +570,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
return count;
}
}
- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
- & OMAP2_MCSPI_CHSTAT_RXS)) {
+ if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 9918a57a6a6e..7e7da97982aa 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -464,7 +464,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6db80635ace8..c2e85e23d538 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -743,8 +743,14 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
for (i = 0; i < sgs; i++) {
if (vmalloced_buf || kmap_buf) {
- min = min_t(size_t,
- len, desc_len - offset_in_page(buf));
+ /*
+ * Next scatterlist entry size is the minimum between
+ * the desc_len and the remaining buffer length that
+ * fits in a page.
+ */
+ min = min_t(size_t, desc_len,
+ min_t(size_t, len,
+ PAGE_SIZE - offset_in_page(buf)));
if (vmalloced_buf)
vm_page = vmalloc_to_page(buf);
else
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index d08324998933..6d690e5fa9bb 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -343,24 +343,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
- ret = -EINVAL;
- goto out;
+ mutex_unlock(&ashmem_mutex);
+ return -EINVAL;
}
if (!asma->file) {
- ret = -EBADF;
- goto out;
+ mutex_unlock(&ashmem_mutex);
+ return -EBADF;
}
+ mutex_unlock(&ashmem_mutex);
+
ret = vfs_llseek(asma->file, offset, origin);
if (ret < 0)
- goto out;
+ return ret;
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
-
-out:
- mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -719,16 +718,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
size_t pgstart, pgend;
int ret = -EINVAL;
+ if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ return -EFAULT;
+
mutex_lock(&ashmem_mutex);
if (unlikely(!asma->file))
goto out_unlock;
- if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
- ret = -EFAULT;
- goto out_unlock;
- }
-
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index a5bf2cc165c0..1736248bc5b8 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
struct comedi_cmd *cmd = &async->cmd;
if (cmd->stop_src == TRIG_COUNT) {
- unsigned int nscans = nsamples / cmd->scan_end_arg;
- unsigned int scans_left = __comedi_nscans_left(s, nscans);
+ unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
unsigned int scan_pos =
comedi_bytes_to_samples(s, async->scan_progress);
unsigned long long samples_left = 0;
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index a574885ffba9..18c5312f7886 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1284,6 +1284,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
ack |= NISTC_INTA_ACK_AI_START;
if (a_status & NISTC_AI_STATUS1_STOP)
ack |= NISTC_INTA_ACK_AI_STOP;
+ if (a_status & NISTC_AI_STATUS1_OVER)
+ ack |= NISTC_INTA_ACK_AI_ERR;
if (ack)
ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index a7416cd9ac71..7b0587d8b176 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -838,7 +838,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req)
if (req->rq_pool || !req->rq_reqbuf)
return;
- kfree(req->rq_reqbuf);
+ kvfree(req->rq_reqbuf);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index e744aa9730ff..dea018cba094 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -834,7 +834,9 @@ static ssize_t message_show(struct kobject *kobj,
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
- BUG_ON(!group);
+ if (WARN_ON(!group))
+ return -EINVAL;
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
retval = message_show_helper(buf, group->start, group->end);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -846,7 +848,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
{
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
- BUG_ON(!group);
+ if (WARN_ON(!group))
+ return -EINVAL;
+
return message_store_helper(buf, count, group);
}
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 5a7a87efed27..28b5392153a8 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -842,7 +842,7 @@ static void
do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
{
struct scsi_device *scsidev;
- unsigned char buf[36];
+ unsigned char *buf;
struct scatterlist *sg;
unsigned int i;
char *this_page;
@@ -857,6 +857,10 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (cmdrsp->scsi.no_disk_result == 0)
return;
+ buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
+ if (!buf)
+ return;
+
/* Linux scsi code wants a device at Lun 0
* to issue report luns, but we don't want
* a disk there so we'll present a processor
@@ -868,6 +872,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (scsi_sg_count(scsicmd) == 0) {
memcpy(scsi_sglist(scsicmd), buf,
cmdrsp->scsi.bufflen);
+ kfree(buf);
return;
}
@@ -879,6 +884,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
memcpy(this_page, buf + bufind, sg[i].length);
kunmap_atomic(this_page_orig);
}
+ kfree(buf);
} else {
devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
for_each_vdisk_match(vdisk, devdata, scsidev) {
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6ab7443eabde..6326375b76ab 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1930,6 +1930,8 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wid.type = WID_STR;
wid.size = ETH_ALEN;
wid.val = kmalloc(wid.size, GFP_KERNEL);
+ if (!wid.val)
+ return -ENOMEM;
stamac = wid.val;
memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 242f82f4d24f..d8ab4cb11289 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -197,6 +197,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ if (!skb2)
+ return -ENOMEM;
memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 170de1c9eac4..f815f9d5045f 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -169,7 +169,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
hw->ident_sta_fw.variant) >
HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
if (msg->scantype.data != P80211ENUM_scantype_active)
- word = cpu_to_le16(msg->maxchanneltime.data);
+ word = msg->maxchanneltime.data;
else
word = 0;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 97928b42ad62..57a3d3936364 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -276,12 +276,11 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
else
ret = vfs_iter_read(fd, &iter, &pos);
- kfree(bvec);
-
if (is_write) {
if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
- return (ret < 0 ? ret : -EINVAL);
+ if (ret >= 0)
+ ret = -EINVAL;
}
} else {
/*
@@ -294,17 +293,29 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
data_length);
- return (ret < 0 ? ret : -EINVAL);
+ if (ret >= 0)
+ ret = -EINVAL;
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
- return ret;
+ } else if (ret != data_length) {
+ /*
+ * Short read case:
+ * Probably some one truncate file under us.
+ * We must explicitly zero sg-pages to prevent
+ * expose uninizialized pages to userspace.
+ */
+ if (ret < data_length)
+ ret += iov_iter_zero(data_length - ret, &iter);
+ else
+ ret = -EINVAL;
}
}
}
- return 1;
+ kfree(bvec);
+ return ret;
}
static sense_reason_t
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 58169e519422..18c8c0a50d37 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -589,7 +589,6 @@ static int __init optee_driver_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, optee_match);
- of_node_put(fw_np);
if (!np)
return -ENODEV;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 06912f0602b7..b7cb49afa056 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -587,6 +587,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
@@ -598,9 +601,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
- data->irq_enabled = true;
- data->mode = THERMAL_DEVICE_ENABLED;
-
return 0;
}
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index b4d3116cfdaf..3055f9a12a17 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if ((instance->trip != params->trip_max_desired_temperature) ||
(!cdev_is_power_actor(instance->cdev)))
@@ -534,6 +535,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(instance->cdev);
}
+ mutex_unlock(&tz->lock);
}
/**
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index ad1186dd6132..a45810b43f70 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -185,6 +185,7 @@
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
* @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -205,6 +206,7 @@ struct exynos_tmu_data {
struct regulator *regulator;
struct thermal_zone_device *tzd;
unsigned int ntrip;
+ bool enabled;
int (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
mutex_lock(&data->lock);
clk_enable(data->clk);
data->tmu_control(pdev, on);
+ data->enabled = on;
clk_disable(data->clk);
mutex_unlock(&data->lock);
}
@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
static int exynos_get_temp(void *p, int *temp)
{
struct exynos_tmu_data *data = p;
+ int value, ret = 0;
- if (!data || !data->tmu_read)
+ if (!data || !data->tmu_read || !data->enabled)
return -EINVAL;
mutex_lock(&data->lock);
clk_enable(data->clk);
- *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+ value = data->tmu_read(data);
+ if (value < 0)
+ ret = value;
+ else
+ *temp = code_to_temp(data, value) * MCELSIUS;
clk_disable(data->clk);
mutex_unlock(&data->lock);
- return 0;
+ return ret;
}
#ifdef CONFIG_THERMAL_EMULATION
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index a8c20413dbda..cba6bc6ab9ed 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -628,6 +628,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
+ .thaw_noirq = nhi_resume_noirq,
.restore_noirq = nhi_resume_noirq,
};
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 54cab59e20ed..9e9016e67843 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@ struct gsm_dlci {
struct mutex mutex;
/* Link layer */
+ int mode;
+#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
@@ -1380,7 +1383,13 @@ retry:
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
- gsm->cretries = gsm->n2;
+
+ /* If DLCI0 is in ADM mode skip retries, it won't respond */
+ if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+ gsm->cretries = 1;
+ else
+ gsm->cretries = gsm->n2;
+
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1467,6 +1476,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* in which case an opening port goes back to closed and a closing port
* is simply put into closed state (any further frames from the other
* end will get a DM response)
+ *
+ * Some control dlci can stay in ADM mode with other dlci working just
+ * fine. In that case we can just keep the control dlci open after the
+ * DLCI_OPENING retries time out.
*/
static void gsm_dlci_t1(unsigned long data)
@@ -1480,8 +1493,16 @@ static void gsm_dlci_t1(unsigned long data)
if (dlci->retries) {
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else
+ } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (debug & 8)
+ pr_info("DLCI %d opening in ADM mode.\n",
+ dlci->addr);
+ dlci->mode = DLCI_MODE_ADM;
+ gsm_dlci_open(dlci);
+ } else {
gsm_dlci_close(dlci);
+ }
+
break;
case DLCI_CLOSING:
dlci->retries--;
@@ -1499,8 +1520,8 @@ static void gsm_dlci_t1(unsigned long data)
* @dlci: DLCI to open
*
* Commence opening a DLCI from the Linux side. We issue SABM messages
- * to the modem which should then reply with a UA, at which point we
- * will move into open state. Opening is done asynchronously with retry
+ * to the modem which should then reply with a UA or ADM, at which point
+ * we will move into open state. Opening is done asynchronously with retry
* running off timers and the responses.
*/
@@ -2854,11 +2875,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
if (debug & 2)
return 1;
+
+ /*
+ * Basic mode with control channel in ADM mode may not respond
+ * to CMD_MSC at all and modem_rx is empty.
+ */
+ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+ !dlci->modem_rx)
+ return 1;
+
return dlci->modem_rx & TIOCM_CD;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index faf50df81622..1c70541a1467 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2182,6 +2182,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
if (tty_hung_up_p(file))
break;
+ /*
+ * Abort readers for ttys which never actually
+ * get hung up. See __tty_hangup().
+ */
+ if (test_bit(TTY_HUPPING, &tty->flags))
+ break;
if (!timeout)
break;
if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 459d726f9d59..3eb01a719d22 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -464,7 +464,8 @@ static int dw8250_probe(struct platform_device *pdev)
/* If no clock rate is defined, fail. */
if (!p->uartclk) {
dev_err(dev, "clock rate not defined\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_clk;
}
data->pclk = devm_clk_get(dev, "apb_pclk");
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index da31159a03ec..e8b34f16ba2c 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -613,6 +613,10 @@ static int omap_8250_startup(struct uart_port *port)
up->lsr_saved_flags = 0;
up->msr_saved_flags = 0;
+ /* Disable DMA for console UART */
+ if (uart_console(port))
+ up->dma = NULL;
+
if (up->dma) {
ret = serial8250_request_dma(up);
if (ret) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index b80ea872b039..e82b3473b6b8 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5100,6 +5100,17 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
/*
+ * BrainBoxes UC-260
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 53af53c43e8c..e3756a9a9b53 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1302,14 +1302,15 @@ static void pl011_stop_tx(struct uart_port *port)
pl011_dma_tx_stop(uap);
}
-static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
+static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
/* Start TX with programmed I/O only (no DMA) */
static void pl011_start_tx_pio(struct uart_amba_port *uap)
{
- uap->im |= UART011_TXIM;
- pl011_write(uap->im, uap, REG_IMSC);
- pl011_tx_chars(uap, false);
+ if (pl011_tx_chars(uap, false)) {
+ uap->im |= UART011_TXIM;
+ pl011_write(uap->im, uap, REG_IMSC);
+ }
}
static void pl011_start_tx(struct uart_port *port)
@@ -1389,25 +1390,26 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
return true;
}
-static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+/* Returns true if tx interrupts have to be (kept) enabled */
+static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
- return;
+ return true;
uap->port.x_char = 0;
--count;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
pl011_stop_tx(&uap->port);
- return;
+ return false;
}
/* If we are using DMA mode, try to send some characters. */
if (pl011_dma_tx_irq(uap))
- return;
+ return true;
do {
if (likely(from_irq) && count-- == 0)
@@ -1422,8 +1424,11 @@ static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
pl011_stop_tx(&uap->port);
+ return false;
+ }
+ return true;
}
static void pl011_modem_status(struct uart_amba_port *uap)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4d079cdaa7a3..addb287cacea 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1780,6 +1780,7 @@ static void atmel_get_ip_name(struct uart_port *port)
switch (version) {
case 0x302:
case 0x10213:
+ case 0x10302:
dev_dbg(port->dev, "This version is usart\n");
atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index c3651540e1ba..b9a4625b8690 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -172,7 +172,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
*/
int __init setup_earlycon(char *buf)
{
- const struct earlycon_id *match;
+ const struct earlycon_id **p_match;
if (!buf || !buf[0])
return -EINVAL;
@@ -180,7 +180,9 @@ int __init setup_earlycon(char *buf)
if (early_con.flags & CON_ENABLED)
return -EALREADY;
- for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+ p_match++) {
+ const struct earlycon_id *match = *p_match;
size_t len = strlen(match->name);
if (strncmp(buf, match->name, len))
@@ -253,11 +255,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
}
port->mapbase = addr;
port->uartclk = BASE_BAUD * 16;
- port->membase = earlycon_map(port->mapbase, SZ_4K);
val = of_get_flat_dt_prop(node, "reg-offset", NULL);
if (val)
port->mapbase += be32_to_cpu(*val);
+ port->membase = earlycon_map(port->mapbase, SZ_4K);
+
val = of_get_flat_dt_prop(node, "reg-shift", NULL);
if (val)
port->regshift = be32_to_cpu(*val);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 521a6e450755..ecadc27eea48 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1316,19 +1316,10 @@ static int imx_startup(struct uart_port *port)
if (!is_imx1_uart(sport)) {
temp = readl(sport->port.membase + UCR3);
- /*
- * The effect of RI and DCD differs depending on the UFCR_DCEDTE
- * bit. In DCE mode they control the outputs, in DTE mode they
- * enable the respective irqs. At least the DCD irq cannot be
- * cleared on i.MX25 at least, so it's not usable and must be
- * disabled. I don't have test hardware to check if RI has the
- * same problem but I consider this likely so it's disabled for
- * now, too.
- */
- temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP |
- UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
+ temp |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
if (sport->dte_mode)
+ /* disable broken interrupts */
temp &= ~(UCR3_RI | UCR3_DCD);
writel(temp, sport->port.membase + UCR3);
@@ -1583,8 +1574,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ufcr = readl(sport->port.membase + UFCR);
ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
- if (sport->dte_mode)
- ufcr |= UFCR_DCEDTE;
writel(ufcr, sport->port.membase + UFCR);
writel(num, sport->port.membase + UBIR);
@@ -2149,6 +2138,37 @@ static int serial_imx_probe(struct platform_device *pdev)
UCR1_TXMPTYEN | UCR1_RTSDEN);
writel_relaxed(reg, sport->port.membase + UCR1);
+ if (!is_imx1_uart(sport) && sport->dte_mode) {
+ /*
+ * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI
+ * and influences if UCR3_RI and UCR3_DCD changes the level of RI
+ * and DCD (when they are outputs) or enables the respective
+ * irqs. So set this bit early, i.e. before requesting irqs.
+ */
+ reg = readl(sport->port.membase + UFCR);
+ if (!(reg & UFCR_DCEDTE))
+ writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
+
+ /*
+ * Disable UCR3_RI and UCR3_DCD irqs. They are also not
+ * enabled later because they cannot be cleared
+ * (confirmed on i.MX25) which makes them unusable.
+ */
+ writel(IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR,
+ sport->port.membase + UCR3);
+
+ } else {
+ unsigned long ucr3 = UCR3_DSR;
+
+ reg = readl(sport->port.membase + UFCR);
+ if (reg & UFCR_DCEDTE)
+ writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
+
+ if (!is_imx1_uart(sport))
+ ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+ writel(ucr3, sport->port.membase + UCR3);
+ }
+
clk_disable_unprepare(sport->clk_ipg);
/*
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index fcf803ffad19..b9c7a904c1ea 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,28 @@ static int sccnxp_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- if (PTR_ERR(clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(clk);
+ if (ret == -EPROBE_DEFER)
goto err_out;
- }
+ uartclk = 0;
+ } else {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_out;
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ goto err_out;
+
+ uartclk = clk_get_rate(clk);
+ }
+
+ if (!uartclk) {
dev_notice(&pdev->dev, "Using default clock frequency\n");
uartclk = s->chip->freq_std;
- } else
- uartclk = clk_get_rate(clk);
+ }
/* Check input frequency */
if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
@@ -983,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev)
uart_unregister_driver(&s->uart);
err_out:
if (!IS_ERR(s->regulator))
- return regulator_disable(s->regulator);
+ regulator_disable(s->regulator);
return ret;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 23973a8124fc..6cbd46c565f8 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1135,6 +1135,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, 1);
+ if (ret == 0)
+ tty_port_set_initialized(port, true);
if (ret > 0)
ret = 0;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 15eaea53b3df..107f0d194ac5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -935,6 +935,8 @@ static void sci_receive_chars(struct uart_port *port)
/* Tell the rest of the system the news. New characters! */
tty_flip_buffer_push(tport);
} else {
+ /* TTY buffers full; read from RX reg to prevent lockup */
+ serial_port_in(port, SCxRDR);
serial_port_in(port, SCxSR); /* dummy read */
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
@@ -1543,7 +1545,16 @@ static void sci_free_dma(struct uart_port *port)
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
-#else
+
+static void sci_flush_buffer(struct uart_port *port)
+{
+ /*
+ * In uart_flush_buffer(), the xmit circular buffer has just been
+ * cleared, so we have to reset tx_dma_len accordingly.
+ */
+ to_sci_port(port)->tx_dma_len = 0;
+}
+#else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port)
{
}
@@ -1551,7 +1562,9 @@ static inline void sci_request_dma(struct uart_port *port)
static inline void sci_free_dma(struct uart_port *port)
{
}
-#endif
+
+#define sci_flush_buffer NULL
+#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
{
@@ -2549,6 +2562,7 @@ static const struct uart_ops sci_uart_ops = {
.break_ctl = sci_break_ctl,
.startup = sci_startup,
.shutdown = sci_shutdown,
+ .flush_buffer = sci_flush_buffer,
.set_termios = sci_set_termios,
.pm = sci_pm,
.type = sci_type,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d9f9a803b42..789c81482542 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -709,6 +709,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
return;
}
+ /*
+ * Some console devices aren't actually hung up for technical and
+ * historical reasons, which can lead to indefinite interruptible
+ * sleep in n_tty_read(). The following explicitly tells
+ * n_tty_read() to abort readers.
+ */
+ set_bit(TTY_HUPPING, &tty->flags);
+
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
@@ -763,6 +771,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
* from the ldisc side, which is now guaranteed.
*/
set_bit(TTY_HUPPED, &tty->flags);
+ clear_bit(TTY_HUPPING, &tty->flags);
tty_unlock(tty);
if (f)
@@ -1702,6 +1711,8 @@ static void release_tty(struct tty_struct *tty, int idx)
if (tty->link)
tty->link->port->itty = NULL;
tty_buffer_cancel_work(tty->port);
+ if (tty->link)
+ tty_buffer_cancel_work(tty->link->port);
tty_kref_put(tty->link);
tty_kref_put(tty);
@@ -3159,7 +3170,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
kref_init(&tty->kref);
tty->magic = TTY_MAGIC;
- tty_ldisc_init(tty);
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3a9e2a2fd4c6..4ab518d43758 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -175,12 +175,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ERR_CAST(ldops);
}
- ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
- if (ld == NULL) {
- put_ldops(ldops);
- return ERR_PTR(-ENOMEM);
- }
-
+ /*
+ * There is no way to handle allocation failure of only 16 bytes.
+ * Let's simplify error handling and save more memory.
+ */
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
@@ -753,12 +752,13 @@ void tty_ldisc_release(struct tty_struct *tty)
* the tty structure is not completely set up when this call is made.
*/
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
- panic("n_tty: init_tty");
+ return PTR_ERR(ld);
tty->ldisc = ld;
+ return 0;
}
/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ce2c3c6349d4..9e1ac58e269e 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1354,6 +1354,11 @@ static void csi_m(struct vc_data *vc)
case 3:
vc->vc_italic = 1;
break;
+ case 21:
+ /*
+ * No console drivers support double underline, so
+ * convert it to a single underline.
+ */
case 4:
vc->vc_underline = 1;
break;
@@ -1389,7 +1394,6 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
- case 21:
case 22:
vc->vc_intensity = 1;
break;
@@ -1727,7 +1731,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
default_attr(vc);
update_attr(vc);
- vc->vc_tab_stop[0] = 0x01010100;
+ vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
vc->vc_tab_stop[2] =
vc->vc_tab_stop[3] =
@@ -1771,7 +1775,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_pos -= (vc->vc_x << 1);
while (vc->vc_x < vc->vc_cols - 1) {
vc->vc_x++;
- if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31)))
+ if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
break;
}
vc->vc_pos += (vc->vc_x << 1);
@@ -1831,7 +1835,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
lf(vc);
return;
case 'H':
- vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31));
+ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
return;
case 'Z':
respond_ID(tty);
@@ -2024,7 +2028,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
case 'g':
if (!vc->vc_par[0])
- vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31));
+ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
else if (vc->vc_par[0] == 3) {
vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index fba021f5736a..208bc52fc84d 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -279,7 +279,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
- goto err_map_kobj;
+ goto err_map;
}
kobject_init(&map->kobj, &map_attr_type);
map->mem = mem;
@@ -289,7 +289,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
goto err_map_kobj;
ret = kobject_uevent(&map->kobj, KOBJ_ADD);
if (ret)
- goto err_map;
+ goto err_map_kobj;
}
for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
@@ -308,7 +308,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
portio = kzalloc(sizeof(*portio), GFP_KERNEL);
if (!portio) {
ret = -ENOMEM;
- goto err_portio_kobj;
+ goto err_portio;
}
kobject_init(&portio->kobj, &portio_attr_type);
portio->port = port;
@@ -319,7 +319,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
goto err_portio_kobj;
ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
if (ret)
- goto err_portio;
+ goto err_portio_kobj;
}
return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 6e0d614a8075..64c6af2c8559 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -839,7 +839,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
{
ci_hdrc_gadget_destroy(ci);
ci_hdrc_host_destroy(ci);
- if (ci->is_otg)
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
}
@@ -939,27 +939,35 @@ static int ci_hdrc_probe(struct platform_device *pdev)
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
ret = ci_hdrc_host_init(ci);
- if (ret)
- dev_info(dev, "doesn't support host\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support host\n");
+ else
+ goto deinit_phy;
+ }
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = ci_hdrc_gadget_init(ci);
- if (ret)
- dev_info(dev, "doesn't support gadget\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support gadget\n");
+ else
+ goto deinit_host;
+ }
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
- goto deinit_phy;
+ goto deinit_gadget;
}
if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
ret = ci_hdrc_otg_init(ci);
if (ret) {
dev_err(dev, "init otg fails, ret = %d\n", ret);
- goto stop;
+ goto deinit_gadget;
}
}
@@ -1024,7 +1032,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci_extcon_unregister(ci);
stop:
- ci_role_destroy(ci);
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+ ci_hdrc_otg_destroy(ci);
+deinit_gadget:
+ ci_hdrc_gadget_destroy(ci);
+deinit_host:
+ ci_hdrc_host_destroy(ci);
deinit_phy:
ci_usb_phy_exit(ci);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7c54a19b20e0..5e6136d2ed71 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -186,7 +186,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
- [USB_ENDPOINT_XFER_BULK] = 512,
+
+ /* Bulk should be 512, but some devices use 1024: we will warn below */
+ [USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8dd784f..a5240b4d7ab9 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -208,8 +208,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
if (!udev->parent)
rc = hcd_bus_suspend(udev, msg);
- /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
- else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+ /*
+ * Non-root USB2 devices don't need to do anything for FREEZE
+ * or PRETHAW. USB3 devices don't support global suspend and
+ * needs to be selectively suspended.
+ */
+ else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+ && (udev->speed < USB_SPEED_SUPER))
rc = 0;
else
rc = usb_port_suspend(udev, msg);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index fb2c38d875f9..c811a81ace5e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2365,6 +2365,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
+ pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d0d3f9ef9f10..d8d992b73e88 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -648,12 +648,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
+ struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
+ port_dev = hub->ports[portnum - 1];
+ if (port_dev && port_dev->child)
+ pm_wakeup_event(&port_dev->child->dev, 0);
+
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
@@ -3417,8 +3422,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
- if (status == 0 && !port_is_suspended(hub, portstatus))
+ if (status == 0 && !port_is_suspended(hub, portstatus)) {
+ if (portchange & USB_PORT_STAT_C_SUSPEND)
+ pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
+ }
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4c388451f31f..9cb66475c211 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -148,6 +148,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
+
kfree(dr);
return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c05c4f877750..40ce175655e6 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* HP v222w 16GB Mini USB Drive */
+ { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -225,8 +228,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair K70 RGB */
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Corsair Strafe RGB */
- { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a06578005..919a32153060 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3237,8 +3237,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
- hsotg->op_state = OTG_STATE_A_HOST;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fea446900cad..a0c2b8b6edd0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -464,6 +464,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
+ * Make sure UX_EXIT_PX is cleared as that causes issues with some
+ * PHYs. Also, this bit is not supposed to be used in normal operation.
+ */
+ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+
+ /*
* Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
* to '0' during coreConsultant configuration. So default value
* will be '0' when the core is reset. Application needs to set it
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 884c43714456..94d6a3e2ad97 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -159,13 +159,15 @@
#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
-#define DWC3_TXFIFOQ 1
-#define DWC3_RXFIFOQ 3
-#define DWC3_TXREQQ 5
-#define DWC3_RXREQQ 7
-#define DWC3_RXINFOQ 9
-#define DWC3_DESCFETCHQ 13
-#define DWC3_EVENTQ 15
+#define DWC3_TXFIFOQ 0
+#define DWC3_RXFIFOQ 1
+#define DWC3_TXREQQ 2
+#define DWC3_RXREQQ 3
+#define DWC3_RXINFOQ 4
+#define DWC3_PSTATQ 5
+#define DWC3_DESCFETCHQ 6
+#define DWC3_EVENTQ 7
+#define DWC3_AUXEVENTQ 8
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
@@ -223,6 +225,7 @@
#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
#define DWC3_GUSB3PIPECTL_DISRXDETINP3 (1 << 28)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_PX (1 << 27)
#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 72664700b8a2..12ee23f53cdd 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev)
return PTR_ERR(kdwc->usbss);
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+ if (IS_ERR(kdwc->clk)) {
+ dev_err(kdwc->dev, "unable to get usb clock\n");
+ return PTR_ERR(kdwc->clk);
+ }
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 427291a19e6d..d6493abcf6bc 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -173,7 +173,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
- return ret;
+ goto err;
}
dwc3->dev.parent = dev;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f483c3b1e971..26efe8c7535f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2528,6 +2528,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
break;
}
+ dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
+
/* Enable USB2 LPM Capability */
if ((dwc->revision > DWC3_REVISION_194A) &&
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 22f0d5a6a07f..6143a2d839d6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1522,7 +1522,6 @@ ffs_fs_kill_sb(struct super_block *sb)
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
ffs_data_closed(sb->s_fs_info);
- ffs_data_put(sb->s_fs_info);
}
}
@@ -2956,10 +2955,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
struct ffs_data *ffs = func->ffs;
const int full = !!func->ffs->fs_descs_count;
- const int high = gadget_is_dualspeed(func->gadget) &&
- func->ffs->hs_descs_count;
- const int super = gadget_is_superspeed(func->gadget) &&
- func->ffs->ss_descs_count;
+ const int high = !!func->ffs->hs_descs_count;
+ const int super = !!func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
struct ffs_ep *eps_ptr;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index b6d4b484c51a..5815120c0402 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -284,6 +284,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
+ struct usb_request *req;
unsigned long flags;
ssize_t status = -ENOMEM;
@@ -293,7 +294,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
-
+try_again:
/* write queue */
while (!WRITE_COND) {
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
@@ -308,6 +309,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
}
hidg->write_pending = 1;
+ req = hidg->req;
count = min_t(unsigned, count, hidg->report_length);
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
@@ -320,24 +322,38 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
goto release_write_pending;
}
- hidg->req->status = 0;
- hidg->req->zero = 0;
- hidg->req->length = count;
- hidg->req->complete = f_hidg_req_complete;
- hidg->req->context = hidg;
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+
+ /* we our function has been disabled by host */
+ if (!hidg->req) {
+ free_ep_req(hidg->in_ep, hidg->req);
+ /*
+ * TODO
+ * Should we fail with error here?
+ */
+ goto try_again;
+ }
+
+ req->status = 0;
+ req->zero = 0;
+ req->length = count;
+ req->complete = f_hidg_req_complete;
+ req->context = hidg;
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- goto release_write_pending;
+ goto release_write_pending_unlocked;
} else {
status = count;
}
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
+release_write_pending_unlocked:
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
@@ -541,12 +557,23 @@ static void hidg_disable(struct usb_function *f)
kfree(list);
}
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->write_pending) {
+ free_ep_req(hidg->in_ep, hidg->req);
+ hidg->write_pending = 1;
+ }
+
+ hidg->req = NULL;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
}
static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_request *req_in = NULL;
+ unsigned long flags;
int i, status = 0;
VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
@@ -567,6 +594,12 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
goto fail;
}
hidg->in_ep->driver_data = hidg;
+
+ req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length);
+ if (!req_in) {
+ status = -ENOMEM;
+ goto disable_ep_in;
+ }
}
@@ -578,12 +611,12 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
hidg->out_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
- goto fail;
+ goto free_req_in;
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
ERROR(cdev, "Enable OUT endpoint FAILED!\n");
- goto fail;
+ goto free_req_in;
}
hidg->out_ep->driver_data = hidg;
@@ -599,17 +632,37 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
req->context = hidg;
status = usb_ep_queue(hidg->out_ep, req,
GFP_ATOMIC);
- if (status)
+ if (status) {
ERROR(cdev, "%s queue req --> %d\n",
hidg->out_ep->name, status);
+ free_ep_req(hidg->out_ep, req);
+ }
} else {
- usb_ep_disable(hidg->out_ep);
status = -ENOMEM;
- goto fail;
+ goto disable_out_ep;
}
}
}
+ if (hidg->in_ep != NULL) {
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ hidg->req = req_in;
+ hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
+ wake_up(&hidg->write_queue);
+ }
+ return 0;
+disable_out_ep:
+ usb_ep_disable(hidg->out_ep);
+free_req_in:
+ if (req_in)
+ free_ep_req(hidg->in_ep, req_in);
+
+disable_ep_in:
+ if (hidg->in_ep)
+ usb_ep_disable(hidg->in_ep);
+
fail:
return status;
}
@@ -658,12 +711,6 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
hidg->out_ep = ep;
- /* preallocate request and buffer */
- status = -ENOMEM;
- hidg->req = alloc_ep_req(hidg->in_ep, hidg->report_length);
- if (!hidg->req)
- goto fail;
-
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
@@ -690,6 +737,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
spin_lock_init(&hidg->write_spinlock);
+ hidg->write_pending = 1;
+ hidg->req = NULL;
spin_lock_init(&hidg->read_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
@@ -954,10 +1003,6 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
device_destroy(hidg_class, MKDEV(major, hidg->minor));
cdev_del(&hidg->cdev);
- /* disable/free request and end point */
- usb_ep_disable(hidg->in_ep);
- free_ep_req(hidg->in_ep, hidg->req);
-
usb_free_all_descriptors(f);
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a5719f271bf0..70ac1963b598 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -389,7 +389,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (err) {
ERROR(midi, "%s: couldn't enqueue request: %d\n",
midi->out_ep->name, err);
- free_ep_req(midi->out_ep, req);
+ if (req->buf != NULL)
+ free_ep_req(midi->out_ep, req);
return err;
}
}
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 7d53a4773d1a..2f03334c6874 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -64,7 +64,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
/* Frees a usb_request previously allocated by alloc_ep_req() */
static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
{
+ WARN_ON(req->buf == NULL);
kfree(req->buf);
+ req->buf = NULL;
usb_ep_free_request(ep, req);
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index ccb9c213cc9f..e9bd8d4abca0 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->dev = dev;
dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
- temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
if ((temp & BDC_P64) &&
!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
dev_dbg(bdc->dev, "Using 64-bit address\n");
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 02968842b359..708e36f530d8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (ret) {
dev_err(&pci->dev,
"couldn't add resources to bdc device\n");
+ platform_device_put(bdc);
return ret;
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index e97539fc127e..188961780b8a 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -139,10 +139,8 @@ int usb_ep_disable(struct usb_ep *ep)
goto out;
ret = ep->ops->disable(ep);
- if (ret) {
- ret = ret;
+ if (ret)
goto out;
- }
ep->enabled = false;
@@ -250,6 +248,9 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request);
* arranges to poll once per interval, and the gadget driver usually will
* have queued some data to transfer at that time.
*
+ * Note that @req's ->complete() callback must never be called from
+ * within usb_ep_queue() as that can create deadlock situations.
+ *
* Returns zero, or a negative error code. Endpoints that are not enabled
* report errors; errors will also be
* reported when the usb peripheral is disconnected.
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index b62a3de65075..ff4d6cac7ac0 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2103,16 +2103,13 @@ static int dummy_hub_control(
}
break;
case USB_PORT_FEAT_POWER:
- if (hcd->speed == HCD_USB3) {
- if (dum_hcd->port_status & USB_PORT_STAT_POWER)
- dev_dbg(dummy_dev(dum_hcd),
- "power-off\n");
- } else
- if (dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER)
- dev_dbg(dummy_dev(dum_hcd),
- "power-off\n");
- /* FALLS THROUGH */
+ dev_dbg(dummy_dev(dum_hcd), "power-off\n");
+ if (hcd->speed == HCD_USB3)
+ dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
+ else
+ dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
+ set_link_state(dum_hcd);
+ break;
default:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
@@ -2283,14 +2280,13 @@ static int dummy_hub_control(
if ((dum_hcd->port_status &
USB_SS_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
- set_link_state(dum_hcd);
}
} else
if ((dum_hcd->port_status &
USB_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
- set_link_state(dum_hcd);
}
+ set_link_state(dum_hcd);
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index f6c7a2744e5c..a646ca3b0d00 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -73,6 +73,7 @@ static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
+#define IO_WATCHDOG_OFF 0xffffff00
#include "ohci.h"
#include "pci-quirks.h"
@@ -230,7 +231,7 @@ static int ohci_urb_enqueue (
}
/* Start up the I/O watchdog timer, if it's not running */
- if (!timer_pending(&ohci->io_watchdog) &&
+ if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&
list_empty(&ohci->eds_in_use) &&
!(ohci->flags & OHCI_QUIRK_QEMU)) {
ohci->prev_frame_no = ohci_frame_no(ohci);
@@ -501,6 +502,7 @@ static int ohci_init (struct ohci_hcd *ohci)
setup_timer(&ohci->io_watchdog, io_watchdog_func,
(unsigned long) ohci);
+ ohci->prev_frame_no = IO_WATCHDOG_OFF;
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -730,7 +732,7 @@ static void io_watchdog_func(unsigned long _ohci)
u32 head;
struct ed *ed;
struct td *td, *td_start, *td_next;
- unsigned frame_no;
+ unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF;
unsigned long flags;
spin_lock_irqsave(&ohci->lock, flags);
@@ -835,7 +837,7 @@ static void io_watchdog_func(unsigned long _ohci)
}
}
if (!list_empty(&ohci->eds_in_use)) {
- ohci->prev_frame_no = frame_no;
+ prev_frame_no = frame_no;
ohci->prev_wdh_cnt = ohci->wdh_cnt;
ohci->prev_donehead = ohci_readl(ohci,
&ohci->regs->donehead);
@@ -845,6 +847,7 @@ static void io_watchdog_func(unsigned long _ohci)
}
done:
+ ohci->prev_frame_no = prev_frame_no;
spin_unlock_irqrestore(&ohci->lock, flags);
}
@@ -973,6 +976,7 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_nec(ohci))
flush_work(&ohci->nec_work);
del_timer_sync(&ohci->io_watchdog);
+ ohci->prev_frame_no = IO_WATCHDOG_OFF;
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
ohci_usb_reset(ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index ed678c17c4ea..798b2d25dda9 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -310,8 +310,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
rc = ohci_rh_suspend (ohci, 0);
spin_unlock_irq (&ohci->lock);
- if (rc == 0)
+ if (rc == 0) {
del_timer_sync(&ohci->io_watchdog);
+ ohci->prev_frame_no = IO_WATCHDOG_OFF;
+ }
return rc;
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 641fed609911..24edb7674710 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1018,6 +1018,8 @@ skip_ed:
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
@@ -1076,21 +1078,22 @@ rescan_this:
goto rescan_this;
/*
- * If no TDs are queued, take ED off the ed_rm_list.
+ * If no TDs are queued, ED is now idle.
* Otherwise, if the HC is running, reschedule.
- * If not, leave it on the list for further dequeues.
+ * If the HC isn't running, add ED back to the
+ * start of the list for later processing.
*/
if (list_empty(&ed->td_list)) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed_schedule(ohci, ed);
} else {
- last = &ed->ed_next;
+ ed->ed_next = ohci->ed_rm_list;
+ ohci->ed_rm_list = ed;
+ /* Don't loop on the same ED */
+ if (last == &ohci->ed_rm_list)
+ last = &ed->ed_next;
}
if (modified)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index dec100811946..ca8b0b1ae37d 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -335,7 +335,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
- .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 9ca595632f17..c5e3032a4d6b 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -46,6 +46,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
@@ -88,6 +91,9 @@ static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index d3d124753266..bd6e06ef88ac 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -433,6 +433,7 @@ static void lvs_rh_disconnect(struct usb_interface *intf)
struct lvs_rh *lvs = usb_get_intfdata(intf);
sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group);
+ usb_poison_urb(lvs->urb); /* used in scheduled work */
flush_work(&lvs->rh_work);
usb_free_urb(lvs->urb);
}
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index e59334b09c41..f8ac59e0158d 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -83,6 +83,8 @@ struct mon_reader_text {
wait_queue_head_t wait;
int printf_size;
+ size_t printf_offset;
+ size_t printf_togo;
char *printf_buf;
struct mutex printf_lock;
@@ -374,75 +376,103 @@ err_alloc:
return rc;
}
-/*
- * For simplicity, we read one record in one system call and throw out
- * what does not fit. This means that the following does not work:
- * dd if=/dbg/usbmon/0t bs=10
- * Also, we do not allow seeks and do not bother advancing the offset.
- */
+static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
+ char __user * const buf, const size_t nbytes)
+{
+ const size_t togo = min(nbytes, rp->printf_togo);
+
+ if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
+ return -EFAULT;
+ rp->printf_togo -= togo;
+ rp->printf_offset += togo;
+ return togo;
+}
+
+/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_t(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
+ ssize_t ret;
- ep = mon_text_read_wait(rp, file);
- if (IS_ERR(ep))
- return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
- ptr.cnt = 0;
- ptr.pbuf = rp->printf_buf;
- ptr.limit = rp->printf_size;
-
- mon_text_read_head_t(rp, &ptr, ep);
- mon_text_read_statset(rp, &ptr, ep);
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
- " %d", ep->length);
- mon_text_read_data(rp, &ptr, ep);
-
- if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
- ptr.cnt = -EFAULT;
+
+ if (rp->printf_togo == 0) {
+
+ ep = mon_text_read_wait(rp, file);
+ if (IS_ERR(ep)) {
+ mutex_unlock(&rp->printf_lock);
+ return PTR_ERR(ep);
+ }
+ ptr.cnt = 0;
+ ptr.pbuf = rp->printf_buf;
+ ptr.limit = rp->printf_size;
+
+ mon_text_read_head_t(rp, &ptr, ep);
+ mon_text_read_statset(rp, &ptr, ep);
+ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ " %d", ep->length);
+ mon_text_read_data(rp, &ptr, ep);
+
+ rp->printf_togo = ptr.cnt;
+ rp->printf_offset = 0;
+
+ kmem_cache_free(rp->e_slab, ep);
+ }
+
+ ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
- kmem_cache_free(rp->e_slab, ep);
- return ptr.cnt;
+ return ret;
}
+/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_u(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
+ ssize_t ret;
- ep = mon_text_read_wait(rp, file);
- if (IS_ERR(ep))
- return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
- ptr.cnt = 0;
- ptr.pbuf = rp->printf_buf;
- ptr.limit = rp->printf_size;
- mon_text_read_head_u(rp, &ptr, ep);
- if (ep->type == 'E') {
- mon_text_read_statset(rp, &ptr, ep);
- } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
- mon_text_read_isostat(rp, &ptr, ep);
- mon_text_read_isodesc(rp, &ptr, ep);
- } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
- mon_text_read_intstat(rp, &ptr, ep);
- } else {
- mon_text_read_statset(rp, &ptr, ep);
+ if (rp->printf_togo == 0) {
+
+ ep = mon_text_read_wait(rp, file);
+ if (IS_ERR(ep)) {
+ mutex_unlock(&rp->printf_lock);
+ return PTR_ERR(ep);
+ }
+ ptr.cnt = 0;
+ ptr.pbuf = rp->printf_buf;
+ ptr.limit = rp->printf_size;
+
+ mon_text_read_head_u(rp, &ptr, ep);
+ if (ep->type == 'E') {
+ mon_text_read_statset(rp, &ptr, ep);
+ } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
+ mon_text_read_isostat(rp, &ptr, ep);
+ mon_text_read_isodesc(rp, &ptr, ep);
+ } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
+ mon_text_read_intstat(rp, &ptr, ep);
+ } else {
+ mon_text_read_statset(rp, &ptr, ep);
+ }
+ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ " %d", ep->length);
+ mon_text_read_data(rp, &ptr, ep);
+
+ rp->printf_togo = ptr.cnt;
+ rp->printf_offset = 0;
+
+ kmem_cache_free(rp->e_slab, ep);
}
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
- " %d", ep->length);
- mon_text_read_data(rp, &ptr, ep);
- if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
- ptr.cnt = -EFAULT;
+ ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
- kmem_cache_free(rp->e_slab, ep);
- return ptr.cnt;
+ return ret;
}
static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 2d9a8067eaca..579aa9accafc 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1774,6 +1774,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
int vbus;
u8 devctl;
+ pm_runtime_get_sync(dev);
spin_lock_irqsave(&musb->lock, flags);
val = musb->a_wait_bcon;
vbus = musb_platform_get_vbus_status(musb);
@@ -1787,6 +1788,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
vbus = 0;
}
spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_put_sync(dev);
return sprintf(buf, "Vbus %s, timeout %lu msec\n",
vbus ? "on" : "off", val);
@@ -2483,10 +2485,11 @@ static int musb_remove(struct platform_device *pdev)
musb_generic_disable(musb);
spin_unlock_irqrestore(&musb->lock, flags);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_platform_exit(musb);
+
pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
pm_runtime_disable(musb->controller);
- musb_platform_exit(musb);
musb_phy_callback = NULL;
if (musb->dma_controller)
musb_dma_controller_destroy(musb->dma_controller);
@@ -2710,7 +2713,8 @@ static int musb_resume(struct device *dev)
if ((devctl & mask) != (musb->context.devctl & mask))
musb->port1_status = 0;
- musb_start(musb);
+ musb_enable_interrupts(musb);
+ musb_platform_enable(musb);
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c9e564..f1219f6d58a9 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -442,7 +442,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
req = next_request(musb_ep);
request = &req->request;
- trace_musb_req_tx(req);
csr = musb_readw(epio, MUSB_TXCSR);
musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
@@ -481,6 +480,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
u8 is_dma = 0;
bool short_packet = false;
+ trace_musb_req_tx(req);
+
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 844a309fe895..e85b9c2a4910 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -114,15 +114,19 @@ static int service_tx_status_request(
}
is_in = epnum & USB_DIR_IN;
- if (is_in) {
- epnum &= 0x0f;
+ epnum &= 0x0f;
+ if (epnum >= MUSB_C_NUM_EPS) {
+ handled = -EINVAL;
+ break;
+ }
+
+ if (is_in)
ep = &musb->endpoints[epnum].ep_in;
- } else {
+ else
ep = &musb->endpoints[epnum].ep_out;
- }
regs = musb->endpoints[epnum].regs;
- if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+ if (!ep->desc) {
handled = -EINVAL;
break;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 55c624f2a8c0..e2bc91585b4f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -418,13 +418,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
}
}
- /*
- * The pipe must be broken if current urb->status is set, so don't
- * start next urb.
- * TODO: to minimize the risk of regression, only check urb->status
- * for RX, until we have a test case to understand the behavior of TX.
- */
- if ((!status || !is_in) && qh && qh->is_ready) {
+ if (qh != NULL && qh->is_ready) {
musb_dbg(musb, "... next ep%d %cX urb %p",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
@@ -1029,7 +1023,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
- musb_start_urb(musb, is_in, next_qh);
+
+ if (next_qh)
+ musb_start_urb(musb, is_in, next_qh);
}
}
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 6c6a3a8df07a..968ade5a35f5 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1001,6 +1001,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
goto usbhsf_pio_prepare_pop;
+ /* return at this time if the pipe is running */
+ if (usbhs_pipe_is_running(pipe))
+ return 0;
+
usbhs_pipe_config_change_bfre(pipe, 1);
ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -1191,6 +1195,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
usbhsf_fifo_clear(pipe, fifo);
pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
+ usbhs_pipe_running(pipe, 0);
usbhsf_dma_stop(pipe, fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8cbaf1c..77c3ebe860c5 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
+ - Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 3178d8afb3e6..d98531823998 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */
{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
@@ -210,6 +211,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0c743e4cca1e..2e2f736384ab 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
+ { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1909,7 +1911,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
return ftdi_jtag_probe(serial);
if (udev->product &&
- (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ (!strcmp(udev->product, "Arrow USB Blaster") ||
+ !strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 543d2801632b..76a10b222ff9 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -922,6 +922,9 @@
/*
* RT Systems programming cables for various ham radios
*/
+/* This device uses the VID of FTDI */
+#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */
+
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
@@ -1441,6 +1444,12 @@
#define FTDI_CINTERION_MC55I_PID 0xA951
/*
+ * Product: FirmwareHubEmulator
+ * Manufacturer: Harman Becker Automotive Systems
+ */
+#define FTDI_FHE_PID 0xA9A0
+
+/*
* Product: Comet Caller ID decoder
* Manufacturer: Crucible Technologies
*/
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1799aa058a5b..d982c455e18e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M 0x90b2
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -244,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
+#define QUECTEL_PRODUCT_EP06 0x0306
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -550,147 +553,15 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
-struct option_blacklist_info {
- /* bitmask of interface numbers blacklisted for send_setup */
- const unsigned long sendsetup;
- /* bitmask of interface numbers that are reserved */
- const unsigned long reserved;
-};
-
-static const struct option_blacklist_info four_g_w14_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info four_g_w100_blacklist = {
- .sendsetup = BIT(1) | BIT(2),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info alcatel_x200_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_0037_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info zte_k3765_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
- .reserved = BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info huawei_cdc12_blacklist = {
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info net_intf0_blacklist = {
- .reserved = BIT(0),
-};
-static const struct option_blacklist_info net_intf1_blacklist = {
- .reserved = BIT(1),
-};
+/* Device flags */
-static const struct option_blacklist_info net_intf2_blacklist = {
- .reserved = BIT(2),
-};
+/* Interface does not support modem-control requests */
+#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
-static const struct option_blacklist_info net_intf3_blacklist = {
- .reserved = BIT(3),
-};
+/* Interface is reserved */
+#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
-static const struct option_blacklist_info net_intf4_blacklist = {
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info net_intf5_blacklist = {
- .reserved = BIT(5),
-};
-
-static const struct option_blacklist_info net_intf6_blacklist = {
- .reserved = BIT(6),
-};
-
-static const struct option_blacklist_info zte_mf626_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_1255_blacklist = {
- .reserved = BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
- .reserved = BIT(5) | BIT(6),
-};
-
-static const struct option_blacklist_info telit_me910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info telit_le910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info telit_le920_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(5),
-};
-
-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
- .sendsetup = BIT(2),
- .reserved = BIT(0) | BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
- .reserved = BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
- .reserved = BIT(1) | BIT(4),
-};
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -724,26 +595,26 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1188,65 +1059,70 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
- .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+ .driver_info = RSVD(1) | RSVD(4) },
+ /* u-blox products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+ .driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1254,38 +1130,38 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
- .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
- .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
- .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1301,58 +1177,58 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1377,26 +1253,26 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1412,50 +1288,50 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1572,23 +1448,23 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1603,7 +1479,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1639,17 +1515,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1667,8 +1543,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1679,20 +1555,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1844,19 +1720,19 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
- .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
- .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1876,37 +1752,34 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
- .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ .driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
- .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
- .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
- .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
- },
+ .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ .driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1932,14 +1805,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
- .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1949,20 +1822,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2039,9 +1912,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -2052,9 +1925,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2114,7 +1987,7 @@ static int option_probe(struct usb_serial *serial,
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
- const struct option_blacklist_info *blacklist;
+ unsigned long device_flags = id->driver_info;
/* Never bind to the CD-Rom emulation interface */
if (iface_desc->bInterfaceClass == 0x08)
@@ -2125,9 +1998,7 @@ static int option_probe(struct usb_serial *serial,
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
- blacklist = (void *)id->driver_info;
- if (blacklist && test_bit(iface_desc->bInterfaceNumber,
- &blacklist->reserved))
+ if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
/*
* Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2138,8 +2009,8 @@ static int option_probe(struct usb_serial *serial,
iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
return -ENODEV;
- /* Store the blacklist info so we can use it during attach. */
- usb_set_serial_data(serial, (void *)blacklist);
+ /* Store the device flags so we can use them during attach. */
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2147,22 +2018,21 @@ static int option_probe(struct usb_serial *serial,
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
- const struct option_blacklist_info *blacklist;
struct usb_wwan_intf_private *data;
+ unsigned long device_flags;
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
- /* Retrieve blacklist info stored at probe. */
- blacklist = usb_get_serial_data(serial);
+ /* Retrieve device flags stored at probe. */
+ device_flags = (unsigned long)usb_get_serial_data(serial);
iface_desc = &serial->interface->cur_altsetting->desc;
- if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
- &blacklist->sendsetup)) {
+ if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
- }
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2c1cf7..2674da40d9cd 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS() \
+ { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -113,6 +118,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&funsoft_device,
&flashloader_device,
&google_device,
+ &libtransistor_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
@@ -129,6 +135,7 @@ static const struct usb_device_id id_table[] = {
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
+ LIBTRANSISTOR_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be89fcf..dbc3801b43eb 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
goto exit;
}
- if (retval == sizeof(*connection_info)) {
- connection_info = (struct visor_connection_info *)
- transfer_buffer;
-
- num_ports = le16_to_cpu(connection_info->num_ports);
- for (i = 0; i < num_ports; ++i) {
- switch (
- connection_info->connections[i].port_function_id) {
- case VISOR_FUNCTION_GENERIC:
- string = "Generic";
- break;
- case VISOR_FUNCTION_DEBUGGER:
- string = "Debugger";
- break;
- case VISOR_FUNCTION_HOTSYNC:
- string = "HotSync";
- break;
- case VISOR_FUNCTION_CONSOLE:
- string = "Console";
- break;
- case VISOR_FUNCTION_REMOTE_FILE_SYS:
- string = "Remote File System";
- break;
- default:
- string = "unknown";
- break;
- }
- dev_info(dev, "%s: port %d, is for %s use\n",
- serial->type->description,
- connection_info->connections[i].port, string);
- }
+ if (retval != sizeof(*connection_info)) {
+ dev_err(dev, "Invalid connection information received from device\n");
+ retval = -ENODEV;
+ goto exit;
}
- /*
- * Handle devices that report invalid stuff here.
- */
+
+ connection_info = (struct visor_connection_info *)transfer_buffer;
+
+ num_ports = le16_to_cpu(connection_info->num_ports);
+
+ /* Handle devices that report invalid stuff here. */
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
+ for (i = 0; i < num_ports; ++i) {
+ switch (connection_info->connections[i].port_function_id) {
+ case VISOR_FUNCTION_GENERIC:
+ string = "Generic";
+ break;
+ case VISOR_FUNCTION_DEBUGGER:
+ string = "Debugger";
+ break;
+ case VISOR_FUNCTION_HOTSYNC:
+ string = "HotSync";
+ break;
+ case VISOR_FUNCTION_CONSOLE:
+ string = "Console";
+ break;
+ case VISOR_FUNCTION_REMOTE_FILE_SYS:
+ string = "Remote File System";
+ break;
+ default:
+ string = "unknown";
+ break;
+ }
+ dev_info(dev, "%s: port %d, is for %s use\n",
+ serial->type->description,
+ connection_info->connections[i].port, string);
+ }
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4340b4925daa..4d6eb48b2c45 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1942,6 +1942,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
bcb->CDB[0] = 0xEF;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+ if (us->srb != NULL)
+ scsi_set_resid(us->srb, 0);
info->BIN_FLAG = flag;
kfree(buf);
@@ -2295,21 +2297,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- int result = 0;
+ int result = USB_STOR_XFER_GOOD;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/*US_DEBUG(usb_stor_show_command(us, srb)); */
scsi_set_resid(srb, 0);
- if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
+ if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
result = ene_init(us);
- } else {
+ if (result == USB_STOR_XFER_GOOD) {
+ result = USB_STOR_TRANSPORT_ERROR;
if (info->SD_Status.Ready)
result = sd_scsi_irp(us, srb);
if (info->MS_Status.Ready)
result = ms_scsi_irp(us, srb);
}
- return 0;
+ return result;
}
static struct scsi_host_template ene_ub6250_host_template;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 6891e9092775..a96dcc660d0f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf)
return 0;
err = uas_configure_endpoints(devinfo);
- if (err && err != ENODEV)
+ if (err && err != -ENODEV)
shost_printk(KERN_ERR, shost,
"%s: alloc streams error %d after reset",
__func__, err);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index b605115eb47a..ca3a5d430ae1 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2137,6 +2137,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
+/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
+UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Reported-by George Cherian <george.cherian@cavium.com> */
UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
"JMicron",
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c05acdd..f761e02e75c9 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -201,7 +201,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
if (!bid)
return -ENODEV;
+ /* device_attach() callers should hold parent lock for USB */
+ if (bid->udev->dev.parent)
+ device_lock(bid->udev->dev.parent);
ret = device_attach(&bid->udev->dev);
+ if (bid->udev->dev.parent)
+ device_unlock(bid->udev->dev.parent);
if (ret < 0) {
dev_err(&bid->udev->dev, "rebind failed\n");
return ret;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f0b955f8504e..109e65ba01a0 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -258,7 +258,7 @@ enum usbip_side {
#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index f1635662c299..f8f7f3803a99 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -105,10 +105,6 @@ static void event_handler(struct work_struct *work)
unset_event(ud, USBIP_EH_UNUSABLE);
}
- /* Stop the error handler. */
- if (ud->event & USBIP_EH_BYE)
- usbip_dbg_eh("removed %p\n", ud);
-
wake_up(&ud->eh_waitq);
}
}
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 0f98f2c7475f..7efa374a4970 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -117,10 +117,14 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
if (rv != 0)
return -EINVAL;
+ if (!udc) {
+ dev_err(dev, "no device");
+ return -ENODEV;
+ }
spin_lock_irqsave(&udc->lock, flags);
/* Don't export what we don't have */
- if (!udc || !udc->driver || !udc->pullup) {
- dev_err(dev, "no device or gadget not bound");
+ if (!udc->driver || !udc->pullup) {
+ dev_err(dev, "gadget not bound");
ret = -ENODEV;
goto unlock;
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 9f1ec4392209..7b8a957b008d 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -810,6 +810,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
{
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
offset + PCI_EXP_DEVCTL);
+ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
@@ -835,6 +836,27 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pci_try_reset_function(vdev->pdev);
}
+ /*
+ * MPS is virtualized to the user, writes do not change the physical
+ * register since determining a proper MPS value requires a system wide
+ * device view. The MRRS is largely independent of MPS, but since the
+ * user does not have that system-wide view, they might set a safe, but
+ * inefficiently low value. Here we allow writes through to hardware,
+ * but we set the floor to the physical device MPS setting, so that
+ * we can at least use full TLPs, as defined by the MPS value.
+ *
+ * NB, if any devices actually depend on an artificially low MRRS
+ * setting, this will need to be revisited, perhaps with a quirk
+ * though pcie_set_readrq().
+ */
+ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+ readrq = 128 <<
+ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+ readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+ pcie_set_readrq(vdev->pdev, readrq);
+ }
+
return count;
}
@@ -853,11 +875,12 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
* Allow writes to device control fields, except devctl_phantom,
* which could confuse IOMMU, MPS, which can break communication
* with other physical devices, and the ARI bit in devctl2, which
- * is set at probe time. FLR gets virtualized via our writefn.
+ * is set at probe time. FLR and MRRS get virtualized via our
+ * writefn.
*/
p_setw(perm, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD,
- ~PCI_EXP_DEVCTL_PHANTOM);
+ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 59b3f62a2d64..70c748a5fbcc 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -195,6 +195,11 @@ static long tce_iommu_register_pages(struct tce_container *container,
return ret;
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ if (!tcemem) {
+ mm_iommu_put(container->mm, mem);
+ return -ENOMEM;
+ }
+
tcemem->mem = mem;
list_add(&tcemem->next, &container->prereg_list);
@@ -1332,8 +1337,16 @@ static int tce_iommu_attach_group(void *iommu_data,
if (!table_group->ops || !table_group->ops->take_ownership ||
!table_group->ops->release_ownership) {
+ if (container->v2) {
+ ret = -EPERM;
+ goto unlock_exit;
+ }
ret = tce_iommu_take_ownership(container, table_group);
} else {
+ if (!container->v2) {
+ ret = -EPERM;
+ goto unlock_exit;
+ }
ret = tce_iommu_take_ownership_ddw(container, table_group);
if (!tce_groups_attached(container) && !container->tables[0])
container->def_window_pending = true;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e5b7652234fc..487586e2d8b9 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -524,7 +524,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
if (!len && vq->busyloop_timeout) {
/* Both tx vq and rx socket were polled here */
- mutex_lock(&vq->mutex);
+ mutex_lock_nested(&vq->mutex, 1);
vhost_disable_notify(&net->dev, vq);
preempt_disable();
@@ -657,7 +657,7 @@ static void handle_rx(struct vhost_net *net)
struct iov_iter fixup;
__virtio16 num_buffers;
- mutex_lock(&vq->mutex);
+ mutex_lock_nested(&vq->mutex, 0);
sock = vq->private_data;
if (!sock)
goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cd38f5add254..fce49ebc575d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -211,8 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
if (mask & POLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
@@ -1176,14 +1175,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- if (vq->iotlb) {
- /* When device IOTLB was used, the access validation
- * will be validated during prefetching.
- */
+ if (!vq_log_access_ok(vq, vq->log_base))
+ return 0;
+
+ /* Access validation occurs at prefetch time with IOTLB */
+ if (vq->iotlb)
return 1;
- }
- return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq, vq->log_base);
+
+ return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 288318ad21dd..8049e7656daa 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -134,7 +134,7 @@ static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr,
{
int rc;
struct backlight_device *bd = to_backlight_device(dev);
- unsigned long power;
+ unsigned long power, old_power;
rc = kstrtoul(buf, 0, &power);
if (rc)
@@ -145,10 +145,16 @@ static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr,
if (bd->ops) {
pr_debug("set power to %lu\n", power);
if (bd->props.power != power) {
+ old_power = bd->props.power;
bd->props.power = power;
- backlight_update_status(bd);
+ rc = backlight_update_status(bd);
+ if (rc)
+ bd->props.power = old_power;
+ else
+ rc = count;
+ } else {
+ rc = count;
}
- rc = count;
}
mutex_unlock(&bd->ops_lock);
@@ -176,8 +182,7 @@ int backlight_device_set_brightness(struct backlight_device *bd,
else {
pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
- backlight_update_status(bd);
- rc = 0;
+ rc = backlight_update_status(bd);
}
}
mutex_unlock(&bd->ops_lock);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239ea3d09..f5574060f9c8 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = lcd->buf,
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f842f9c0..e4bd63e9db6b 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
spi_message_init(m);
- x->cs_change = 1;
+ x->cs_change = 0;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea92737a..4dc5ee8debeb 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = buf,
};
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 11576611a974..dda1c4b3a229 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -405,7 +405,10 @@ static const char *vgacon_startup(void)
vga_video_port_val = VGA_CRT_DM;
if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
static struct resource ega_console_resource =
- { .name = "ega", .start = 0x3B0, .end = 0x3BF };
+ { .name = "ega",
+ .flags = IORESOURCE_IO,
+ .start = 0x3B0,
+ .end = 0x3BF };
vga_video_type = VIDEO_TYPE_EGAM;
vga_vram_size = 0x8000;
display_desc = "EGA+";
@@ -413,9 +416,15 @@ static const char *vgacon_startup(void)
&ega_console_resource);
} else {
static struct resource mda1_console_resource =
- { .name = "mda", .start = 0x3B0, .end = 0x3BB };
+ { .name = "mda",
+ .flags = IORESOURCE_IO,
+ .start = 0x3B0,
+ .end = 0x3BB };
static struct resource mda2_console_resource =
- { .name = "mda", .start = 0x3BF, .end = 0x3BF };
+ { .name = "mda",
+ .flags = IORESOURCE_IO,
+ .start = 0x3BF,
+ .end = 0x3BF };
vga_video_type = VIDEO_TYPE_MDA;
vga_vram_size = 0x2000;
display_desc = "*MDA";
@@ -437,15 +446,21 @@ static const char *vgacon_startup(void)
vga_vram_size = 0x8000;
if (!screen_info.orig_video_isVGA) {
- static struct resource ega_console_resource
- = { .name = "ega", .start = 0x3C0, .end = 0x3DF };
+ static struct resource ega_console_resource =
+ { .name = "ega",
+ .flags = IORESOURCE_IO,
+ .start = 0x3C0,
+ .end = 0x3DF };
vga_video_type = VIDEO_TYPE_EGAC;
display_desc = "EGA";
request_resource(&ioport_resource,
&ega_console_resource);
} else {
- static struct resource vga_console_resource
- = { .name = "vga+", .start = 0x3C0, .end = 0x3DF };
+ static struct resource vga_console_resource =
+ { .name = "vga+",
+ .flags = IORESOURCE_IO,
+ .start = 0x3C0,
+ .end = 0x3DF };
vga_video_type = VIDEO_TYPE_VGAC;
display_desc = "VGA+";
request_resource(&ioport_resource,
@@ -489,7 +504,10 @@ static const char *vgacon_startup(void)
}
} else {
static struct resource cga_console_resource =
- { .name = "cga", .start = 0x3D4, .end = 0x3D5 };
+ { .name = "cga",
+ .flags = IORESOURCE_IO,
+ .start = 0x3D4,
+ .end = 0x3D5 };
vga_video_type = VIDEO_TYPE_CGA;
vga_vram_size = 0x2000;
display_desc = "*CGA";
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index ec2671d98abc..89880b70cc28 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -892,8 +892,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
if (err)
return err;
- framesize = fb->panel->mode.xres * fb->panel->mode.yres *
- fb->panel->bpp / 8;
+ framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres *
+ fb->panel->bpp / 8);
fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
&dma, GFP_KERNEL);
if (!fb->fb.screen_base)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index b529a8c2b652..3984716096aa 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -455,6 +455,8 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
}
static const struct of_device_id td028ttec1_of_match[] = {
+ { .compatible = "omapdss,tpo,td028ttec1", },
+ /* keep to not break older DTB */
{ .compatible = "omapdss,toppoly,td028ttec1", },
{},
};
@@ -474,6 +476,7 @@ static struct spi_driver td028ttec1_spi_driver = {
module_spi_driver(td028ttec1_spi_driver);
+MODULE_ALIAS("spi:tpo,td028ttec1");
MODULE_ALIAS("spi:toppoly,td028ttec1");
MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 47d7f69ad9ad..48c6500c24e1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -941,11 +941,13 @@ static int dss_init_features(struct platform_device *pdev)
return 0;
}
+static void dss_uninit_ports(struct platform_device *pdev);
+
static int dss_init_ports(struct platform_device *pdev)
{
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int r;
+ int r, ret = 0;
if (parent == NULL)
return 0;
@@ -972,17 +974,21 @@ static int dss_init_ports(struct platform_device *pdev)
switch (port_type) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(pdev, port);
+ ret = dpi_init_port(pdev, port);
break;
case OMAP_DISPLAY_TYPE_SDI:
- sdi_init_port(pdev, port);
+ ret = sdi_init_port(pdev, port);
break;
default:
break;
}
- } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
+ } while (!ret &&
+ (port = omapdss_of_get_next_port(parent, port)) != NULL);
- return 0;
+ if (ret)
+ dss_uninit_ports(pdev);
+
+ return ret;
}
static void dss_uninit_ports(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index d0a4e2f79a57..d215faacce04 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info,
info->fbmem = ioremap(res->start, resource_size(res));
if (info->fbmem == NULL) {
dev_err(dev, "cannot remap framebuffer\n");
+ ret = -ENXIO;
goto err_mem_res;
}
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 53326badfb61..2add8def83be 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = {
static int dlfb_select_std_channel(struct dlfb_data *dev)
{
int ret;
- u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
+ void *buf;
+ static const u8 set_def_chn[] = {
+ 0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2 };
+ buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
- set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
+ buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+
return ret;
}
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index da653a080394..54127905bfe7 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -239,8 +239,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
*/
static int vfb_set_par(struct fb_info *info)
{
+ switch (info->var.bits_per_pixel) {
+ case 1:
+ info->fix.visual = FB_VISUAL_MONO01;
+ break;
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ }
+
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
+
return 0;
}
@@ -450,6 +465,8 @@ static int vfb_probe(struct platform_device *dev)
goto err2;
platform_set_drvdata(dev, info);
+ vfb_set_par(info);
+
fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
videomemorysize >> 10);
return 0;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 162689227a23..b73520aaf697 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame)
+{
+ /* for side by side (half) we also need to provide 3D_Ext_Data */
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ return 6;
+ else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return 5;
+ else
+ return 4;
+}
+
/**
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
@@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
u8 *ptr = buffer;
size_t length;
- /* empty info frame */
- if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
- return -EINVAL;
-
/* only one of those can be supplied */
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
- /* for side by side (half) we also need to provide 3D_Ext_Data */
- if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
- frame->length = 6;
- else
- frame->length = 5;
+ frame->length = hdmi_vendor_infoframe_length(frame);
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
ptr[5] = 0x0c;
ptr[6] = 0x00;
- if (frame->vic) {
- ptr[7] = 0x1 << 5; /* video format */
- ptr[8] = frame->vic;
- } else {
+ if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
ptr[7] = 0x2 << 5; /* video format */
ptr[8] = (frame->s3d_struct & 0xf) << 4;
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
+ } else if (frame->vic) {
+ ptr[7] = 0x1 << 5; /* video format */
+ ptr[8] = frame->vic;
+ } else {
+ ptr[7] = 0x0 << 5; /* video format */
}
hdmi_infoframe_set_checksum(buffer, length);
@@ -1161,7 +1166,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
ptr[1] != 1 ||
- (ptr[2] != 5 && ptr[2] != 6))
+ (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
return -EINVAL;
length = ptr[2];
@@ -1189,16 +1194,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
hvf->length = length;
- if (hdmi_video_format == 0x1) {
- hvf->vic = ptr[4];
- } else if (hdmi_video_format == 0x2) {
+ if (hdmi_video_format == 0x2) {
+ if (length != 5 && length != 6)
+ return -EINVAL;
hvf->s3d_struct = ptr[4] >> 4;
if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
- if (length == 6)
- hvf->s3d_ext_data = ptr[5] >> 4;
- else
+ if (length != 6)
return -EINVAL;
+ hvf->s3d_ext_data = ptr[5] >> 4;
}
+ } else if (hdmi_video_format == 0x1) {
+ if (length != 5)
+ return -EINVAL;
+ hvf->vic = ptr[4];
+ } else {
+ if (length != 4)
+ return -EINVAL;
}
return 0;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 489bfc61cf30..8977f40ea441 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -423,8 +423,6 @@ unmap_release:
i = vq->vring.desc[i].next;
}
- vq->vq.num_free += total_sg;
-
if (indirect)
kfree(desc);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3eb58cb51e56..8f8909a668d7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -799,11 +799,12 @@ config EBC_C384_WDT
the timeout module parameter.
config F71808E_WDT
- tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
+ tristate "Fintek F718xx, F818xx Super I/O Watchdog"
depends on X86
help
- This is the driver for the hardware watchdog on the Fintek
- F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
+ This is the driver for the hardware watchdog on the Fintek F71808E,
+ F71862FG, F71868, F71869, F71882FG, F71889FG, F81865 and F81866
+ Super I/O controllers.
You can compile this driver directly into the kernel, or use
it as a module. The module will be called f71808e_wdt.
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 1b7e9169072f..e682bf046e50 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -57,6 +57,7 @@
#define SIO_F71808_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71868_ID 0x1106 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
@@ -101,7 +102,7 @@ MODULE_PARM_DESC(timeout,
static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH;
module_param(pulse_width, uint, 0);
MODULE_PARM_DESC(pulse_width,
- "Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
+ "Watchdog signal pulse width. 0(=level), 1, 25, 30, 125, 150, 5000 or 6000 ms"
" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
@@ -119,13 +120,14 @@ module_param(start_withtimeout, uint, 0);
MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
-enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865,
- f81866};
+enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg,
+ f81865, f81866};
static const char *f71808e_names[] = {
"f71808fg",
"f71858fg",
"f71862fg",
+ "f71868",
"f71869",
"f71882fg",
"f71889fg",
@@ -252,16 +254,23 @@ static int watchdog_set_timeout(int timeout)
static int watchdog_set_pulse_width(unsigned int pw)
{
int err = 0;
+ unsigned int t1 = 25, t2 = 125, t3 = 5000;
+
+ if (watchdog.type == f71868) {
+ t1 = 30;
+ t2 = 150;
+ t3 = 6000;
+ }
mutex_lock(&watchdog.lock);
- if (pw <= 1) {
+ if (pw <= 1) {
watchdog.pulse_val = 0;
- } else if (pw <= 25) {
+ } else if (pw <= t1) {
watchdog.pulse_val = 1;
- } else if (pw <= 125) {
+ } else if (pw <= t2) {
watchdog.pulse_val = 2;
- } else if (pw <= 5000) {
+ } else if (pw <= t3) {
watchdog.pulse_val = 3;
} else {
pr_err("pulse width out of range\n");
@@ -354,6 +363,7 @@ static int watchdog_start(void)
goto exit_superio;
break;
+ case f71868:
case f71869:
/* GPIO14 --> WDTRST# */
superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4);
@@ -486,7 +496,7 @@ static bool watchdog_is_running(void)
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
- & F71808FG_FLAG_WD_EN);
+ & BIT(F71808FG_FLAG_WD_EN));
superio_exit(watchdog.sioaddr);
@@ -792,6 +802,9 @@ static int __init f71808e_find(int sioaddr)
watchdog.type = f71862fg;
err = f71862fg_pin_configure(0); /* validate module parameter */
break;
+ case SIO_F71868_ID:
+ watchdog.type = f71868;
+ break;
case SIO_F71869_ID:
case SIO_F71869A_ID:
watchdog.type = f71869;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 70c7194e2810..b0a158073abd 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -28,16 +28,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
-#ifdef CONFIG_HPWDT_NMI_DECODING
-#include <linux/dmi.h>
-#include <linux/spinlock.h>
-#include <linux/nmi.h>
-#include <linux/kdebug.h>
-#include <linux/notifier.h>
-#include <asm/cacheflush.h>
-#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
-#include <asm/frame.h>
#define HPWDT_VERSION "1.4.0"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
@@ -48,10 +39,14 @@
static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
static unsigned int reload; /* the computed soft_margin */
static bool nowayout = WATCHDOG_NOWAYOUT;
+#ifdef CONFIG_HPWDT_NMI_DECODING
+static unsigned int allow_kdump = 1;
+#endif
static char expect_release;
static unsigned long hpwdt_is_open;
static void __iomem *pci_mem_addr; /* the PCI-memory address */
+static unsigned long __iomem *hpwdt_nmistat;
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
@@ -62,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
};
MODULE_DEVICE_TABLE(pci, hpwdt_devices);
-#ifdef CONFIG_HPWDT_NMI_DECODING
-#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
-#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
-#define PCI_BIOS32_PARAGRAPH_LEN 16
-#define PCI_ROM_BASE1 0x000F0000
-#define ROM_SIZE 0x10000
-
-struct bios32_service_dir {
- u32 signature;
- u32 entry_point;
- u8 revision;
- u8 length;
- u8 checksum;
- u8 reserved[5];
-};
-
-/* type 212 */
-struct smbios_cru64_info {
- u8 type;
- u8 byte_length;
- u16 handle;
- u32 signature;
- u64 physical_address;
- u32 double_length;
- u32 double_offset;
-};
-#define SMBIOS_CRU64_INFORMATION 212
-
-/* type 219 */
-struct smbios_proliant_info {
- u8 type;
- u8 byte_length;
- u16 handle;
- u32 power_features;
- u32 omega_features;
- u32 reserved;
- u32 misc_features;
-};
-#define SMBIOS_ICRU_INFORMATION 219
-
-
-struct cmn_registers {
- union {
- struct {
- u8 ral;
- u8 rah;
- u16 rea2;
- };
- u32 reax;
- } u1;
- union {
- struct {
- u8 rbl;
- u8 rbh;
- u8 reb2l;
- u8 reb2h;
- };
- u32 rebx;
- } u2;
- union {
- struct {
- u8 rcl;
- u8 rch;
- u16 rec2;
- };
- u32 recx;
- } u3;
- union {
- struct {
- u8 rdl;
- u8 rdh;
- u16 red2;
- };
- u32 redx;
- } u4;
-
- u32 resi;
- u32 redi;
- u16 rds;
- u16 res;
- u32 reflags;
-} __attribute__((packed));
-
-static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump = 1;
-static unsigned int is_icru;
-static unsigned int is_uefi;
-static DEFINE_SPINLOCK(rom_lock);
-static void *cru_rom_addr;
-static struct cmn_registers cmn_regs;
-
-extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
- unsigned long *pRomEntry);
-
-#ifdef CONFIG_X86_32
-/* --32 Bit Bios------------------------------------------------------------ */
-
-#define HPWDT_ARCH 32
-
-asm(".text \n\t"
- ".align 4 \n\t"
- ".globl asminline_call \n"
- "asminline_call: \n\t"
- "pushl %ebp \n\t"
- "movl %esp, %ebp \n\t"
- "pusha \n\t"
- "pushf \n\t"
- "push %es \n\t"
- "push %ds \n\t"
- "pop %es \n\t"
- "movl 8(%ebp),%eax \n\t"
- "movl 4(%eax),%ebx \n\t"
- "movl 8(%eax),%ecx \n\t"
- "movl 12(%eax),%edx \n\t"
- "movl 16(%eax),%esi \n\t"
- "movl 20(%eax),%edi \n\t"
- "movl (%eax),%eax \n\t"
- "push %cs \n\t"
- "call *12(%ebp) \n\t"
- "pushf \n\t"
- "pushl %eax \n\t"
- "movl 8(%ebp),%eax \n\t"
- "movl %ebx,4(%eax) \n\t"
- "movl %ecx,8(%eax) \n\t"
- "movl %edx,12(%eax) \n\t"
- "movl %esi,16(%eax) \n\t"
- "movl %edi,20(%eax) \n\t"
- "movw %ds,24(%eax) \n\t"
- "movw %es,26(%eax) \n\t"
- "popl %ebx \n\t"
- "movl %ebx,(%eax) \n\t"
- "popl %ebx \n\t"
- "movl %ebx,28(%eax) \n\t"
- "pop %es \n\t"
- "popf \n\t"
- "popa \n\t"
- "leave \n\t"
- "ret \n\t"
- ".previous");
-
-
-/*
- * cru_detect
- *
- * Routine Description:
- * This function uses the 32-bit BIOS Service Directory record to
- * search for a $CRU record.
- *
- * Return Value:
- * 0 : SUCCESS
- * <0 : FAILURE
- */
-static int cru_detect(unsigned long map_entry,
- unsigned long map_offset)
-{
- void *bios32_map;
- unsigned long *bios32_entrypoint;
- unsigned long cru_physical_address;
- unsigned long cru_length;
- unsigned long physical_bios_base = 0;
- unsigned long physical_bios_offset = 0;
- int retval = -ENODEV;
-
- bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
-
- if (bios32_map == NULL)
- return -ENODEV;
-
- bios32_entrypoint = bios32_map + map_offset;
-
- cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
-
- set_memory_x((unsigned long)bios32_map, 2);
- asminline_call(&cmn_regs, bios32_entrypoint);
-
- if (cmn_regs.u1.ral != 0) {
- pr_warn("Call succeeded but with an error: 0x%x\n",
- cmn_regs.u1.ral);
- } else {
- physical_bios_base = cmn_regs.u2.rebx;
- physical_bios_offset = cmn_regs.u4.redx;
- cru_length = cmn_regs.u3.recx;
- cru_physical_address =
- physical_bios_base + physical_bios_offset;
-
- /* If the values look OK, then map it in. */
- if ((physical_bios_base + physical_bios_offset)) {
- cru_rom_addr =
- ioremap(cru_physical_address, cru_length);
- if (cru_rom_addr) {
- set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
- (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
- retval = 0;
- }
- }
-
- pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
- pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
- pr_debug("CRU Length: 0x%lx\n", cru_length);
- pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
- }
- iounmap(bios32_map);
- return retval;
-}
-
-/*
- * bios_checksum
- */
-static int bios_checksum(const char __iomem *ptr, int len)
-{
- char sum = 0;
- int i;
-
- /*
- * calculate checksum of size bytes. This should add up
- * to zero if we have a valid header.
- */
- for (i = 0; i < len; i++)
- sum += ptr[i];
-
- return ((sum == 0) && (len > 0));
-}
-
-/*
- * bios32_present
- *
- * Routine Description:
- * This function finds the 32-bit BIOS Service Directory
- *
- * Return Value:
- * 0 : SUCCESS
- * <0 : FAILURE
- */
-static int bios32_present(const char __iomem *p)
-{
- struct bios32_service_dir *bios_32_ptr;
- int length;
- unsigned long map_entry, map_offset;
-
- bios_32_ptr = (struct bios32_service_dir *) p;
-
- /*
- * Search for signature by checking equal to the swizzled value
- * instead of calling another routine to perform a strcmp.
- */
- if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
- length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
- if (bios_checksum(p, length)) {
- /*
- * According to the spec, we're looking for the
- * first 4KB-aligned address below the entrypoint
- * listed in the header. The Service Directory code
- * is guaranteed to occupy no more than 2 4KB pages.
- */
- map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
- map_offset = bios_32_ptr->entry_point - map_entry;
-
- return cru_detect(map_entry, map_offset);
- }
- }
- return -ENODEV;
-}
-
-static int detect_cru_service(void)
-{
- char __iomem *p, *q;
- int rc = -1;
-
- /*
- * Search from 0x0f0000 through 0x0fffff, inclusive.
- */
- p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
- if (p == NULL)
- return -ENOMEM;
-
- for (q = p; q < p + ROM_SIZE; q += 16) {
- rc = bios32_present(q);
- if (!rc)
- break;
- }
- iounmap(p);
- return rc;
-}
-/* ------------------------------------------------------------------------- */
-#endif /* CONFIG_X86_32 */
-#ifdef CONFIG_X86_64
-/* --64 Bit Bios------------------------------------------------------------ */
-
-#define HPWDT_ARCH 64
-
-asm(".text \n\t"
- ".align 4 \n\t"
- ".globl asminline_call \n\t"
- ".type asminline_call, @function \n\t"
- "asminline_call: \n\t"
- FRAME_BEGIN
- "pushq %rax \n\t"
- "pushq %rbx \n\t"
- "pushq %rdx \n\t"
- "pushq %r12 \n\t"
- "pushq %r9 \n\t"
- "movq %rsi, %r12 \n\t"
- "movq %rdi, %r9 \n\t"
- "movl 4(%r9),%ebx \n\t"
- "movl 8(%r9),%ecx \n\t"
- "movl 12(%r9),%edx \n\t"
- "movl 16(%r9),%esi \n\t"
- "movl 20(%r9),%edi \n\t"
- "movl (%r9),%eax \n\t"
- "call *%r12 \n\t"
- "pushfq \n\t"
- "popq %r12 \n\t"
- "movl %eax, (%r9) \n\t"
- "movl %ebx, 4(%r9) \n\t"
- "movl %ecx, 8(%r9) \n\t"
- "movl %edx, 12(%r9) \n\t"
- "movl %esi, 16(%r9) \n\t"
- "movl %edi, 20(%r9) \n\t"
- "movq %r12, %rax \n\t"
- "movl %eax, 28(%r9) \n\t"
- "popq %r9 \n\t"
- "popq %r12 \n\t"
- "popq %rdx \n\t"
- "popq %rbx \n\t"
- "popq %rax \n\t"
- FRAME_END
- "ret \n\t"
- ".previous");
-
-/*
- * dmi_find_cru
- *
- * Routine Description:
- * This function checks whether or not a SMBIOS/DMI record is
- * the 64bit CRU info or not
- */
-static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
-{
- struct smbios_cru64_info *smbios_cru64_ptr;
- unsigned long cru_physical_address;
-
- if (dm->type == SMBIOS_CRU64_INFORMATION) {
- smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
- if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
- cru_physical_address =
- smbios_cru64_ptr->physical_address +
- smbios_cru64_ptr->double_offset;
- cru_rom_addr = ioremap(cru_physical_address,
- smbios_cru64_ptr->double_length);
- set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
- smbios_cru64_ptr->double_length >> PAGE_SHIFT);
- }
- }
-}
-
-static int detect_cru_service(void)
-{
- cru_rom_addr = NULL;
-
- dmi_walk(dmi_find_cru, NULL);
-
- /* if cru_rom_addr has been set then we found a CRU service */
- return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
-}
-/* ------------------------------------------------------------------------- */
-#endif /* CONFIG_X86_64 */
-#endif /* CONFIG_HPWDT_NMI_DECODING */
/*
* Watchdog operations
@@ -475,32 +103,22 @@ static int hpwdt_time_left(void)
}
#ifdef CONFIG_HPWDT_NMI_DECODING
+static int hpwdt_my_nmi(void)
+{
+ return ioread8(hpwdt_nmistat) & 0x6;
+}
+
/*
* NMI Handler
*/
static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
{
- unsigned long rom_pl;
- static int die_nmi_called;
-
- if (!hpwdt_nmi_decoding)
+ if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
return NMI_DONE;
- spin_lock_irqsave(&rom_lock, rom_pl);
- if (!die_nmi_called && !is_icru && !is_uefi)
- asminline_call(&cmn_regs, cru_rom_addr);
- die_nmi_called = 1;
- spin_unlock_irqrestore(&rom_lock, rom_pl);
-
if (allow_kdump)
hpwdt_stop();
- if (!is_icru && !is_uefi) {
- if (cmn_regs.u1.ral == 0) {
- nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
- return NMI_HANDLED;
- }
- }
nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
"for the NMI is logged in any one of the following "
"resources:\n"
@@ -666,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
* Init & Exit
*/
-#ifdef CONFIG_HPWDT_NMI_DECODING
-#ifdef CONFIG_X86_LOCAL_APIC
-static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
-{
- /*
- * If nmi_watchdog is turned off then we can turn on
- * our nmi decoding capability.
- */
- hpwdt_nmi_decoding = 1;
-}
-#else
-static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
-{
- dev_warn(&dev->dev, "NMI decoding is disabled. "
- "Your kernel does not support a NMI Watchdog.\n");
-}
-#endif /* CONFIG_X86_LOCAL_APIC */
-
-/*
- * dmi_find_icru
- *
- * Routine Description:
- * This function checks whether or not we are on an iCRU-based server.
- * This check is independent of architecture and needs to be made for
- * any ProLiant system.
- */
-static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
-{
- struct smbios_proliant_info *smbios_proliant_ptr;
-
- if (dm->type == SMBIOS_ICRU_INFORMATION) {
- smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
- if (smbios_proliant_ptr->misc_features & 0x01)
- is_icru = 1;
- if (smbios_proliant_ptr->misc_features & 0x408)
- is_uefi = 1;
- }
-}
static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
+#ifdef CONFIG_HPWDT_NMI_DECODING
int retval;
-
- /*
- * On typical CRU-based systems we need to map that service in
- * the BIOS. For 32 bit Operating Systems we need to go through
- * the 32 Bit BIOS Service Directory. For 64 bit Operating
- * Systems we get that service through SMBIOS.
- *
- * On systems that support the new iCRU service all we need to
- * do is call dmi_walk to get the supported flag value and skip
- * the old cru detect code.
- */
- dmi_walk(dmi_find_icru, NULL);
- if (!is_icru && !is_uefi) {
-
- /*
- * We need to map the ROM to get the CRU service.
- * For 32 bit Operating Systems we need to go through the 32 Bit
- * BIOS Service Directory
- * For 64 bit Operating Systems we get that service through SMBIOS.
- */
- retval = detect_cru_service();
- if (retval < 0) {
- dev_warn(&dev->dev,
- "Unable to detect the %d Bit CRU Service.\n",
- HPWDT_ARCH);
- return retval;
- }
-
- /*
- * We know this is the only CRU call we need to make so lets keep as
- * few instructions as possible once the NMI comes in.
- */
- cmn_regs.u1.rah = 0x0D;
- cmn_regs.u1.ral = 0x02;
- }
-
/*
* Only one function can register for NMI_UNKNOWN
*/
@@ -771,45 +316,26 @@ error:
dev_warn(&dev->dev,
"Unable to register a die notifier (err=%d).\n",
retval);
- if (cru_rom_addr)
- iounmap(cru_rom_addr);
return retval;
+#endif /* CONFIG_HPWDT_NMI_DECODING */
+ return 0;
}
static void hpwdt_exit_nmi_decoding(void)
{
+#ifdef CONFIG_HPWDT_NMI_DECODING
unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
unregister_nmi_handler(NMI_SERR, "hpwdt");
unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
- if (cru_rom_addr)
- iounmap(cru_rom_addr);
-}
-#else /* !CONFIG_HPWDT_NMI_DECODING */
-static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
-{
-}
-
-static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
-{
- return 0;
+#endif
}
-static void hpwdt_exit_nmi_decoding(void)
-{
-}
-#endif /* CONFIG_HPWDT_NMI_DECODING */
-
static int hpwdt_init_one(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int retval;
/*
- * Check if we can do NMI decoding or not
- */
- hpwdt_check_nmi_decoding(dev);
-
- /*
* First let's find out if we are on an iLO2+ server. We will
* not run on a legacy ASM box.
* So we only support the G5 ProLiant servers and higher.
@@ -842,6 +368,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
retval = -ENOMEM;
goto error_pci_iomap;
}
+ hpwdt_nmistat = pci_mem_addr + 0x6e;
hpwdt_timer_reg = pci_mem_addr + 0x70;
hpwdt_timer_con = pci_mem_addr + 0x72;
@@ -912,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
#ifdef CONFIG_HPWDT_NMI_DECODING
module_param(allow_kdump, int, 0);
MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
-#endif /* !CONFIG_HPWDT_NMI_DECODING */
+#endif /* CONFIG_HPWDT_NMI_DECODING */
module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 32930a073a12..977fe74e5abe 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -760,6 +760,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
{
struct watchdog_core_data *wd_data;
struct watchdog_device *wdd;
+ bool hw_running;
int err;
/* Get the corresponding watchdog device */
@@ -779,7 +780,8 @@ static int watchdog_open(struct inode *inode, struct file *file)
* If the /dev/watchdog device is open, we don't want the module
* to be unloaded.
*/
- if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) {
+ hw_running = watchdog_hw_running(wdd);
+ if (!hw_running && !try_module_get(wdd->ops->owner)) {
err = -EBUSY;
goto out_clear;
}
@@ -790,7 +792,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
file->private_data = wd_data;
- if (!watchdog_hw_running(wdd))
+ if (!hw_running)
kref_get(&wd_data->kref);
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 6d1fbda0f461..0da9943d405f 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
memset(&r, 0, sizeof(r));
r.start = gas->address;
- r.end = r.start + gas->access_width;
+ r.end = r.start + gas->access_width - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
r.flags = IORESOURCE_MEM;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 79b8ab4c6663..910b5d40c6e9 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -378,10 +378,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1) {
- range--;
+ if (map->unmap_ops[offset+range].handle == -1)
break;
- }
range++;
}
err = __unmap_grant_pages(map, offset, range);
@@ -1079,8 +1077,10 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (use_ptemod) {
map->vma = NULL;
+ unmap_grant_pages(map, 0, map->count);
+ }
gntdev_put_map(priv, map);
return err;
}
diff --git a/fs/aio.c b/fs/aio.c
index 211ebc21e4db..9ca471f4db66 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -69,9 +69,9 @@ struct aio_ring {
#define AIO_RING_PAGES 8
struct kioctx_table {
- struct rcu_head rcu;
- unsigned nr;
- struct kioctx *table[];
+ struct rcu_head rcu;
+ unsigned nr;
+ struct kioctx __rcu *table[];
};
struct kioctx_cpu {
@@ -116,7 +116,9 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
- struct swork_event free_work;
+ struct rcu_head free_rcu;
+ struct work_struct free_work; /* see free_ioctx() */
+ struct swork_event free_swork; /* see free_ioctx_users() */
/*
* signals when all in-flight requests are done
@@ -331,7 +333,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
for (i = 0; i < table->nr; i++) {
struct kioctx *ctx;
- ctx = table->table[i];
+ ctx = rcu_dereference(table->table[i]);
if (ctx && ctx->aio_ring_file == file) {
if (!atomic_read(&ctx->dead)) {
ctx->user_id = ctx->mmap_base = vma->vm_start;
@@ -583,9 +585,15 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
return cancel(&kiocb->common);
}
-static void free_ioctx(struct swork_event *sev)
+/*
+ * free_ioctx() should be RCU delayed to synchronize against the RCU
+ * protected lookup_ioctx() and also needs process context to call
+ * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
+ * ->free_work.
+ */
+static void free_ioctx(struct work_struct *work)
{
- struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
+ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
pr_debug("freeing %p\n", ctx);
@@ -596,6 +604,14 @@ static void free_ioctx(struct swork_event *sev)
kmem_cache_free(kioctx_cachep, ctx);
}
+static void free_ioctx_rcufn(struct rcu_head *head)
+{
+ struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
+
+ INIT_WORK(&ctx->free_work, free_ioctx);
+ schedule_work(&ctx->free_work);
+}
+
static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
@@ -604,8 +620,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
- INIT_SWORK(&ctx->free_work, free_ioctx);
- swork_queue(&ctx->free_work);
+ /* Synchronize against RCU protected table->table[] dereferences */
+ call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
}
/*
@@ -615,7 +631,7 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
*/
static void free_ioctx_users_work(struct swork_event *sev)
{
- struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
+ struct kioctx *ctx = container_of(sev, struct kioctx, free_swork);
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
@@ -638,8 +654,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, users);
- INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
- swork_queue(&ctx->free_work);
+ INIT_SWORK(&ctx->free_swork, free_ioctx_users_work);
+ swork_queue(&ctx->free_swork);
}
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
@@ -654,9 +670,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
while (1) {
if (table)
for (i = 0; i < table->nr; i++)
- if (!table->table[i]) {
+ if (!rcu_access_pointer(table->table[i])) {
ctx->id = i;
- table->table[i] = ctx;
+ rcu_assign_pointer(table->table[i], ctx);
spin_unlock(&mm->ioctx_lock);
/* While kioctx setup is in progress,
@@ -831,11 +847,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
}
table = rcu_dereference_raw(mm->ioctx_table);
- WARN_ON(ctx != table->table[ctx->id]);
- table->table[ctx->id] = NULL;
+ WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
+ RCU_INIT_POINTER(table->table[ctx->id], NULL);
spin_unlock(&mm->ioctx_lock);
- /* percpu_ref_kill() will do the necessary call_rcu() */
+ /* free_ioctx_reqs() will do the necessary RCU synchronization */
wake_up_all(&ctx->wait);
/*
@@ -877,7 +893,8 @@ void exit_aio(struct mm_struct *mm)
skipped = 0;
for (i = 0; i < table->nr; ++i) {
- struct kioctx *ctx = table->table[i];
+ struct kioctx *ctx =
+ rcu_dereference_protected(table->table[i], true);
if (!ctx) {
skipped++;
@@ -1066,7 +1083,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr)
goto out;
- ctx = table->table[id];
+ ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
percpu_ref_get(&ctx->users);
ret = ctx;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index a11f73174877..6182d693cf43 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
autofs4_del_active(dentry);
- inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
+ inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
return -ENOMEM;
d_add(dentry, inode);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 8d8370ddb6b2..1ba49ebe67da 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -114,13 +114,17 @@ out:
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int ret;
+ umode_t old_mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (ret)
return ret;
}
- return __btrfs_set_acl(NULL, inode, acl, type);
+ ret = __btrfs_set_acl(NULL, inode, acl, type);
+ if (ret)
+ inode->i_mode = old_mode;
+ return ret;
}
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8ed05d95584a..03ac3ab4b3b4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2453,7 +2453,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
if (!uptodate) {
ClearPageUptodate(page);
SetPageError(page);
- ret = ret < 0 ? ret : -EIO;
+ ret = err < 0 ? err : -EIO;
mapping_set_error(page->mapping, ret);
}
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3286a6e47ff0..c95ff096cd24 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2817,8 +2817,10 @@ static long btrfs_fallocate(struct file *file, int mode,
}
ret = btrfs_qgroup_reserve_data(inode, cur_offset,
last_byte - cur_offset);
- if (ret < 0)
+ if (ret < 0) {
+ free_extent_map(em);
break;
+ }
} else {
/*
* Do not need to reserve unwritten extent for this
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d196ce4be31c..ffd5831ca15c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -567,8 +567,10 @@ cont:
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
- btrfs_free_reserved_data_space_noquota(inode, start,
- end - start + 1);
+ if (ret == 0)
+ btrfs_free_reserved_data_space_noquota(inode,
+ start,
+ end - start + 1);
goto free_pages_out;
}
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9a47b5598df7..d040afc966fe 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5156,13 +5156,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
while (key.offset < ekey->offset + left_len) {
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
right_type = btrfs_file_extent_type(eb, ei);
- if (right_type != BTRFS_FILE_EXTENT_REG) {
+ if (right_type != BTRFS_FILE_EXTENT_REG &&
+ right_type != BTRFS_FILE_EXTENT_INLINE) {
ret = 0;
goto out;
}
right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
- right_len = btrfs_file_extent_num_bytes(eb, ei);
+ if (right_type == BTRFS_FILE_EXTENT_INLINE) {
+ right_len = btrfs_file_extent_inline_len(eb, slot, ei);
+ right_len = PAGE_ALIGN(right_len);
+ } else {
+ right_len = btrfs_file_extent_num_bytes(eb, ei);
+ }
right_offset = btrfs_file_extent_offset(eb, ei);
right_gen = btrfs_file_extent_generation(eb, ei);
@@ -5176,6 +5182,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
goto out;
}
+ /*
+ * We just wanted to see if when we have an inline extent, what
+ * follows it is a regular extent (wanted to check the above
+ * condition for inline extents too). This should normally not
+ * happen but it's possible for example when we have an inline
+ * compressed extent representing data with a size matching
+ * the page size (currently the same as sector size).
+ */
+ if (right_type == BTRFS_FILE_EXTENT_INLINE) {
+ ret = 0;
+ goto out;
+ }
+
left_offset_fixed = left_offset;
if (key.offset < ekey->offset) {
/* Fix the right offset for 2a and 7. */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5539f0b95efa..52401732cddc 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3664,7 +3664,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src_offset = btrfs_item_ptr_offset(src, start_slot + i);
- if ((i == (nr - 1)))
+ if (i == nr - 1)
last_key = ins_keys[i];
if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 06a77e47957d..4730ba2cc049 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -583,6 +583,7 @@ void btrfs_free_stale_device(struct btrfs_device *cur_dev)
btrfs_sysfs_remove_fsid(fs_devs);
list_del(&fs_devs->list);
free_fs_devices(fs_devs);
+ break;
} else {
fs_devs->num_devices--;
list_del(&dev->dev_list);
@@ -3764,6 +3765,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs)
{
struct btrfs_fs_info *fs_info = bctl->fs_info;
+ u64 meta_target, data_target;
u64 allowed;
int mixed = 0;
int ret;
@@ -3860,11 +3862,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
} while (read_seqretry(&fs_info->profiles_lock, seq));
- if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
- btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
+ /* if we're not converting, the target field is uninitialized */
+ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+ bctl->meta.target : fs_info->avail_metadata_alloc_bits;
+ data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+ bctl->data.target : fs_info->avail_data_alloc_bits;
+ if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
+ btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
btrfs_warn(fs_info,
"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
- bctl->meta.target, bctl->data.target);
+ meta_target, data_target);
}
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
@@ -4748,10 +4755,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (devs_max && ndevs > devs_max)
ndevs = devs_max;
/*
- * the primary goal is to maximize the number of stripes, so use as many
- * devices as possible, even if the stripes are not maximum sized.
+ * The primary goal is to maximize the number of stripes, so use as
+ * many devices as possible, even if the stripes are not maximum sized.
+ *
+ * The DUP profile stores more than one stripe per device, the
+ * max_avail is the total size so we have to adjust.
*/
- stripe_size = devices_info[ndevs-1].max_avail;
+ stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
num_stripes = ndevs * dev_stripes;
/*
@@ -4791,8 +4801,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = devices_info[ndevs-1].max_avail;
}
- stripe_size = div_u64(stripe_size, dev_stripes);
-
/* align to BTRFS_STRIPE_LEN */
stripe_size = div_u64(stripe_size, raid_stripe_len);
stripe_size *= raid_stripe_len;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ca3f630db90f..e7ddb23d9bb7 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -598,7 +598,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
struct ceph_aio_request {
struct kiocb *iocb;
size_t total_len;
- int write;
+ bool write;
+ bool should_dirty;
int error;
struct list_head osd_reqs;
unsigned num_reqs;
@@ -708,7 +709,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
}
- ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
+ ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
ceph_osdc_put_request(req);
if (rc < 0)
@@ -890,6 +891,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
+ bool should_dirty = !write && iter_is_iovec(iter);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
@@ -954,6 +956,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (aio_req) {
aio_req->iocb = iocb;
aio_req->write = write;
+ aio_req->should_dirty = should_dirty;
INIT_LIST_HEAD(&aio_req->osd_reqs);
if (write) {
aio_req->mtime = mtime;
@@ -1012,7 +1015,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
len = ret;
}
- ceph_put_page_vector(pages, num_pages, !write);
+ ceph_put_page_vector(pages, num_pages, should_dirty);
ceph_osdc_put_request(req);
if (ret < 0)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 7b496a4e650e..4ed4736b5bc6 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1412,6 +1412,7 @@ struct dfs_info3_param {
#define CIFS_FATTR_NEED_REVAL 0x4
#define CIFS_FATTR_INO_COLLISION 0x8
#define CIFS_FATTR_UNKNOWN_NLINK 0x10
+#define CIFS_FATTR_FAKE_ROOT_INO 0x20
struct cifs_fattr {
u32 cf_flags;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d9cbda269462..331ddd07e505 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
goto mknod_out;
}
+ if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ goto mknod_out;
+
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
goto mknod_out;
@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
- kfree(full_path);
rc = -ENOMEM;
- free_xid(xid);
- return rc;
+ goto mknod_out;
}
if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
pdev->minor = cpu_to_le64(MINOR(device_number));
rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
- } /* else if (S_ISFIFO) */
+ }
tcon->ses->server->ops->close(xid, tcon, &fid);
d_drop(direntry);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 02e403af9518..49eeed25f200 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -589,7 +589,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
- down_read(&cinode->lock_sem);
+ down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
if (cinode->can_cache_brlcks) {
/* can cache locks - no need to relock */
up_read(&cinode->lock_sem);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7ab5be7944aa..24c19eb94fa3 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -701,6 +701,18 @@ cgfi_exit:
return rc;
}
+/* Simple function to return a 64 bit hash of string. Rarely called */
+static __u64 simple_hashstr(const char *str)
+{
+ const __u64 hash_mult = 1125899906842597L; /* a big enough prime */
+ __u64 hash = 0;
+
+ while (*str)
+ hash = (hash + (__u64) *str++) * hash_mult;
+
+ return hash;
+}
+
int
cifs_get_inode_info(struct inode **inode, const char *full_path,
FILE_ALL_INFO *data, struct super_block *sb, int xid,
@@ -810,6 +822,14 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
tmprc);
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
+ } else if ((fattr.cf_uniqueid == 0) &&
+ strlen(full_path) == 0) {
+ /* some servers ret bad root ino ie 0 */
+ cifs_dbg(FYI, "Invalid (0) inodenum\n");
+ fattr.cf_flags |=
+ CIFS_FATTR_FAKE_ROOT_INO;
+ fattr.cf_uniqueid =
+ simple_hashstr(tcon->treeName);
}
}
} else
@@ -826,6 +846,16 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
&fattr.cf_uniqueid, data);
if (tmprc)
fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ else if ((fattr.cf_uniqueid == 0) &&
+ strlen(full_path) == 0) {
+ /*
+ * Reuse existing root inode num since
+ * inum zero for root causes ls of . and .. to
+ * not be returned
+ */
+ cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
+ fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ }
} else
fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
}
@@ -887,6 +917,9 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
}
cgii_exit:
+ if ((*inode) && ((*inode)->i_ino == 0))
+ cifs_dbg(FYI, "inode number of zero returned\n");
+
kfree(buf);
cifs_put_tlink(tlink);
return rc;
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index abae6dd2c6b9..cc88f4f0325e 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -980,10 +980,10 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
days = sd->Day;
month = sd->Month;
- if ((days > 31) || (month > 12)) {
+ if (days < 1 || days > 31 || month < 1 || month > 12) {
cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days);
- if (month > 12)
- month = 12;
+ days = clamp(days, 1, 31);
+ month = clamp(month, 1, 12);
}
month -= 1;
days += total_days_of_prev_months[month];
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 538d9b55699a..c3db2a882aee 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -344,13 +344,12 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
/* BB is NTLMV2 session security format easier to use here? */
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- if (ses->server->sign) {
+ NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
+ NTLMSSP_NEGOTIATE_SEAL;
+ if (ses->server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (!ses->server->session_estab ||
- ses->ntlmssp->sesskey_per_smbsess)
- flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
- }
+ if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -407,13 +406,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
flags = NTLMSSP_NEGOTIATE_56 |
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- if (ses->server->sign) {
+ NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
+ NTLMSSP_NEGOTIATE_SEAL;
+ if (ses->server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (!ses->server->session_estab ||
- ses->ntlmssp->sesskey_per_smbsess)
- flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
- }
+ if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 94c4c1901222..44b7ccbe4b08 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -707,15 +707,13 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
mutex_lock(&ses->server->srv_mutex);
- if (ses->server->sign && ses->server->ops->generate_signingkey) {
+ if (ses->server->ops->generate_signingkey) {
rc = ses->server->ops->generate_signingkey(ses);
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
mutex_unlock(&ses->server->srv_mutex);
- goto keygen_exit;
+ return rc;
}
}
if (!ses->server->session_estab) {
@@ -729,12 +727,6 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
-
-keygen_exit:
- if (!ses->server->sign) {
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
- }
return rc;
}
@@ -1159,15 +1151,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
goto tcon_exit;
}
- if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
+ switch (rsp->ShareType) {
+ case SMB2_SHARE_TYPE_DISK:
cifs_dbg(FYI, "connection to disk share\n");
- else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
+ break;
+ case SMB2_SHARE_TYPE_PIPE:
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
- } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
- tcon->print = true;
+ break;
+ case SMB2_SHARE_TYPE_PRINT:
+ tcon->ipc = true;
cifs_dbg(FYI, "connection to printer\n");
- } else {
+ break;
+ default:
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
@@ -1712,6 +1708,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
} else
iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
+ req->hdr.Flags |= SMB2_FLAGS_SIGNED;
rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f2d7402abe02..93c8e4a4bbd3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -833,7 +833,7 @@ static int compat_ioctl_preallocate(struct file *file,
*/
#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
-#define COMPATIBLE_IOCTL(cmd) XFORM(cmd),
+#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
/* ioctl should not be warned about even if it's not implemented.
Valid reasons to use this:
- It is implemented with ->compat_ioctl on some device, but programs
diff --git a/fs/dax.c b/fs/dax.c
index 800748f10b3d..71f87d74afe1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -785,6 +785,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (ret < 0)
return ret;
}
+ start_index = indices[pvec.nr - 1] + 1;
}
return 0;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index f0719b2f1be5..ff77ec32b67b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -462,9 +462,11 @@ static void dentry_lru_add(struct dentry *dentry)
* d_drop() is used mainly for stuff that wants to invalidate a dentry for some
* reason (NFS timeouts or autofs deletes).
*
- * __d_drop requires dentry->d_lock.
+ * __d_drop requires dentry->d_lock
+ * ___d_drop doesn't mark dentry as "unhashed"
+ * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
*/
-void __d_drop(struct dentry *dentry)
+static void ___d_drop(struct dentry *dentry)
{
if (!d_unhashed(dentry)) {
struct hlist_bl_head *b;
@@ -480,12 +482,17 @@ void __d_drop(struct dentry *dentry)
hlist_bl_lock(b);
__hlist_bl_del(&dentry->d_hash);
- dentry->d_hash.pprev = NULL;
hlist_bl_unlock(b);
/* After this call, in-progress rcu-walk path lookup will fail. */
write_seqcount_invalidate(&dentry->d_seq);
}
}
+
+void __d_drop(struct dentry *dentry)
+{
+ ___d_drop(dentry);
+ dentry->d_hash.pprev = NULL;
+}
EXPORT_SYMBOL(__d_drop);
void d_drop(struct dentry *dentry)
@@ -638,11 +645,16 @@ again:
spin_unlock(&parent->d_lock);
goto again;
}
- rcu_read_unlock();
- if (parent != dentry)
+ if (parent != dentry) {
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- else
+ if (unlikely(dentry->d_lockref.count < 0)) {
+ spin_unlock(&parent->d_lock);
+ parent = NULL;
+ }
+ } else {
parent = NULL;
+ }
+ rcu_read_unlock();
return parent;
}
@@ -2385,7 +2397,7 @@ EXPORT_SYMBOL(d_delete);
static void __d_rehash(struct dentry *entry)
{
struct hlist_bl_head *b = d_hash(entry->d_name.hash);
- BUG_ON(!d_unhashed(entry));
+
hlist_bl_lock(b);
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
@@ -2827,9 +2839,9 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
/* unhash both */
- /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
- __d_drop(dentry);
- __d_drop(target);
+ /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
+ ___d_drop(dentry);
+ ___d_drop(target);
/* Switch the names.. */
if (exchange)
@@ -2841,6 +2853,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
__d_rehash(dentry);
if (exchange)
__d_rehash(target);
+ else
+ target->d_hash.pprev = NULL;
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index e04ec868e37e..6776f4aa3d12 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
*/
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
- ext4_group_desc_csum_set(sb, block_group, gdp);
return 0;
}
@@ -322,6 +320,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
+ ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
@@ -339,20 +338,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether block bitmap block number is set */
blk = ext4_block_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode bitmap block number is set */
blk = ext4_inode_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode table block number is set */
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+ return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
EXT4_B2C(sbi, offset));
@@ -418,6 +422,7 @@ struct buffer_head *
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh;
ext4_fsblk_t bitmap_blk;
int err;
@@ -426,6 +431,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
if (!desc)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_block_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid block bitmap block %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot get buffer for block bitmap - "
@@ -447,6 +458,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
+ set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
if (err) {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 1a0c57100f28..63c702b4b24c 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5356,8 +5356,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
stop = le32_to_cpu(extent->ee_block);
/*
- * In case of left shift, Don't start shifting extents until we make
- * sure the hole is big enough to accommodate the shift.
+ * For left shifts, make sure the hole on the left is big enough to
+ * accommodate the shift. For right shifts, make sure the last extent
+ * won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
path = ext4_find_extent(inode, start - 1, &path,
@@ -5377,9 +5378,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end)) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (shift > EXT_MAX_BLOCKS -
+ (stop + ext4_ext_get_actual_len(extent))) {
+ ret = -EINVAL;
+ goto out;
}
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 510e66422f04..08fca4add1e2 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -429,7 +429,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
int i, num;
unsigned long nr_pages;
- num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
if (nr_pages == 0)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2d94e8524839..dcf63daefee0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}
-/* Initializes an uninitialized inode bitmap */
-static int ext4_init_inode_bitmap(struct super_block *sb,
- struct buffer_head *bh,
- ext4_group_t block_group,
- struct ext4_group_desc *gdp)
-{
- struct ext4_group_info *grp;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- J_ASSERT_BH(bh, buffer_locked(bh));
-
- /* If checksum is bad mark all blocks and inodes use to prevent
- * allocation, essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- grp = ext4_get_group_info(sb, block_group);
- if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
- percpu_counter_sub(&sbi->s_freeclusters_counter,
- grp->bb_free);
- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
- if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
- int count;
- count = ext4_free_inodes_count(sb, gdp);
- percpu_counter_sub(&sbi->s_freeinodes_counter,
- count);
- }
- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
- return -EFSBADCRC;
- }
-
- memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
- ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
- bh->b_data);
- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
- ext4_group_desc_csum_set(sb, block_group, gdp);
-
- return 0;
-}
-
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
@@ -157,6 +119,7 @@ static struct buffer_head *
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh = NULL;
ext4_fsblk_t bitmap_blk;
int err;
@@ -166,6 +129,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_inode_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid inode bitmap blk %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot read inode bitmap - "
@@ -184,17 +153,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
- err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
+ memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+ sb->s_blocksize * 8, bh->b_data);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err) {
- ext4_error(sb, "Failed to init inode bitmap for group "
- "%u: %d", block_group, err);
- goto out;
- }
return bh;
}
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5cccec68a0a5..340428274532 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3396,7 +3396,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
ssize_t ret;
loff_t offset = iocb->ki_pos;
size_t count = iov_iter_count(iter);
@@ -3420,7 +3419,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
goto out;
}
orphan = 1;
- ei->i_disksize = inode->i_size;
+ ext4_update_i_disksize(inode, inode->i_size);
ext4_journal_stop(handle);
}
@@ -3548,7 +3547,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
- ei->i_disksize = end;
+ ext4_update_i_disksize(inode, end);
i_size_write(inode, end);
/*
* We're going to return a positive `ret'
@@ -4494,6 +4493,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
+ if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
+ EXT4_ERROR_INODE(inode, "root inode unallocated");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 64056c6eb857..14bd37041e1a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3877,7 +3877,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, "Error loading buddy information for %u", group);
+ ext4_warning(sb, "Error %d loading buddy information for %u",
+ err, group);
put_bh(bitmap_bh);
return 0;
}
@@ -4034,10 +4035,11 @@ repeat:
BUG_ON(pa->pa_type != MB_INODE_PA);
group = ext4_get_group_number(sb, pa->pa_pstart);
- err = ext4_mb_load_buddy(sb, group, &e4b);
+ err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
if (err) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
+ ext4_error(sb, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
@@ -4293,11 +4295,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
spin_unlock(&lg->lg_prealloc_lock);
list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
+ int err;
group = ext4_get_group_number(sb, pa->pa_pstart);
- if (ext4_mb_load_buddy(sb, group, &e4b)) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
+ err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
+ if (err) {
+ ext4_error(sb, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
ext4_lock_group(sb, group);
@@ -5117,8 +5122,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ret = ext4_mb_load_buddy(sb, group, &e4b);
if (ret) {
- ext4_error(sb, "Error in loading buddy "
- "information for %u", group);
+ ext4_warning(sb, "Error %d loading buddy information for %u",
+ ret, group);
return ret;
}
bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1ec4b6e34747..bfb83d76d128 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2260,6 +2260,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u overlaps "
"superblock", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
}
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2272,6 +2274,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u overlaps "
"superblock", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
}
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2284,6 +2288,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u overlaps "
"superblock", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
}
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 3eeed8f0aa06..3fadfabcac39 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -837,8 +837,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (!IS_LAST_ENTRY(s->first))
ext4_xattr_rehash(header(s->base),
s->here);
- ext4_xattr_cache_insert(ext4_mb_cache,
- bs->bh);
}
ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh);
@@ -959,6 +957,7 @@ inserted:
} else if (bs->bh && s->base == bs->bh->b_data) {
/* We were modifying this block in-place. */
ea_bdebug(bs->bh, "keeping this block");
+ ext4_xattr_cache_insert(ext4_mb_cache, bs->bh);
new_bh = bs->bh;
get_bh(new_bh);
} else {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 99432b59c5cb..ae354ac67da1 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -844,7 +844,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
if (!ret) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
- bh->b_size = map.m_len << inode->i_blkbits;
+ bh->b_size = (u64)map.m_len << inode->i_blkbits;
}
return ret;
}
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 7b32ce979fe1..63e519658d73 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -177,7 +177,7 @@ static void __drop_largest_extent(struct inode *inode,
}
/* return true, if inode page is changed */
-bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et;
@@ -215,6 +215,16 @@ out:
return false;
}
+bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+{
+ bool ret = __f2fs_init_extent_tree(inode, i_ext);
+
+ if (!F2FS_I(inode)->extent_tree)
+ set_inode_flag(inode, FI_NO_EXTENT);
+
+ return ret;
+}
+
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
struct extent_info *ei)
{
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 34a69e7ed90b..17ab23f64bba 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -538,8 +538,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
get_node_info(sbi, nid, dni);
if (sum->version != dni->version) {
- f2fs_put_page(node_page, 1);
- return false;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: valid data with mismatched node version.",
+ __func__);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
*nofs = ofs_of_node(node_page);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 0703a1179847..f3aea1b8702c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
*/
if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb;
- bool locked, congested;
+ struct wb_lock_cookie lock_cookie = {};
+ bool congested;
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested;
}
@@ -1941,7 +1942,7 @@ void wb_workfn(struct work_struct *work)
}
if (!list_empty(&wb->work_list))
- mod_delayed_work(bdi_wq, &wb->dwork, 0);
+ wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 7d4b557f1962..d10bb2c30bf8 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -276,11 +276,11 @@ loop:
goto loop;
end_loop:
- write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
jbd_debug(1, "Journal thread exiting.\n");
+ write_unlock(&journal->j_state_lock);
return 0;
}
@@ -691,8 +691,21 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
- jbd2_might_wait_for_commit(journal);
read_lock(&journal->j_state_lock);
+#ifdef CONFIG_PROVE_LOCKING
+ /*
+ * Some callers make sure transaction is already committing and in that
+ * case we cannot block on open handles anymore. So don't warn in that
+ * case.
+ */
+ if (tid_gt(tid, journal->j_commit_sequence) &&
+ (!journal->j_committing_transaction ||
+ journal->j_committing_transaction->t_tid != tid)) {
+ read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
+ read_lock(&journal->j_state_lock);
+ }
+#endif
#ifdef CONFIG_JBD2_DEBUG
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_ERR
@@ -938,7 +951,7 @@ out:
}
/*
- * This is a variaon of __jbd2_update_log_tail which checks for validity of
+ * This is a variation of __jbd2_update_log_tail which checks for validity of
* provided log tail and locks j_checkpoint_mutex. So it is safe against races
* with other threads updating log tail.
*/
@@ -1381,6 +1394,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
journal_superblock_t *sb = journal->j_superblock;
int ret;
+ if (is_journal_aborted(journal))
+ return -EIO;
+
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4e5c6103b76c..9e9e0936138b 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -528,6 +528,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
if (ret < 0) {
+ handle->h_journal = journal;
jbd2_journal_free_reserved(handle);
return ret;
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 5ef21f4c4c77..59c019a148f6 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb)
static void jffs2_kill_sb(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- if (!(sb->s_flags & MS_RDONLY))
+ if (c && !(sb->s_flags & MS_RDONLY))
jffs2_stop_garbage_collect_thread(c);
kill_mtd_super(sb);
kfree(c);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 9d373247222c..c19123dcd1a4 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -132,6 +132,8 @@ lockd(void *vrqstp)
{
int err = 0;
struct svc_rqst *rqstp = vrqstp;
+ struct net *net = &init_net;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
/* try_to_freeze() is called from svc_recv() */
set_freezable();
@@ -176,6 +178,8 @@ lockd(void *vrqstp)
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
return 0;
}
diff --git a/fs/namei.c b/fs/namei.c
index 968c574d4aba..7996c8c2123b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -221,9 +221,10 @@ getname_kernel(const char * filename)
if (len <= EMBEDDED_NAME_MAX) {
result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
+ const size_t size = offsetof(struct filename, iname[1]);
struct filename *tmp;
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ tmp = kmalloc(size, GFP_KERNEL);
if (unlikely(!tmp)) {
__putname(result);
return ERR_PTR(-ENOMEM);
@@ -578,9 +579,10 @@ static int __nd_alloc_stack(struct nameidata *nd)
static bool path_connected(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
+ struct super_block *sb = mnt->mnt_sb;
- /* Only bind mounts can have disconnected paths */
- if (mnt->mnt_root == mnt->mnt_sb->s_root)
+ /* Bind mounts and multi-root filesystems can have disconnected paths */
+ if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
return true;
return is_subdir(path->dentry, mnt->mnt_root);
@@ -1121,9 +1123,6 @@ static int follow_automount(struct path *path, struct nameidata *nd,
path->dentry->d_inode)
return -EISDIR;
- if (path->dentry->d_sb->s_user_ns != &init_user_ns)
- return -EACCES;
-
nd->total_link_count++;
if (nd->total_link_count >= 40)
return -ELOOP;
diff --git a/fs/namespace.c b/fs/namespace.c
index da188c6966a3..ffa9923ff4f7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1037,7 +1037,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
goto out_free;
}
- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags;
+ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
/* Don't allow unprivileged users to change mount flags */
if (flag & CL_UNPRIVILEGED) {
mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 88dbbc9fcf4d..f571570a2e72 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -980,6 +980,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,
goto out;
}
*bytes_read = ncp_reply_be16(server, 0);
+ if (*bytes_read > to_read) {
+ result = -EINVAL;
+ goto out;
+ }
source = ncp_reply_data(server, 2 + (offset & 1));
memcpy(target, source, *bytes_read);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 1ac1593aded3..1ab91124a93e 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -86,10 +86,10 @@ struct nfs_direct_req {
struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
int mirror_count;
+ loff_t io_start; /* Start offset for I/O */
ssize_t count, /* bytes actually processed */
max_count, /* max expected count */
bytes_left, /* bytes left to be sent */
- io_start, /* start of IO */
error; /* any reported error */
struct completion completion; /* wait for i/o completion */
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 13abd608af0f..4539008502ce 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -475,6 +475,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
goto out_err_free;
/* fh */
+ rc = -EIO;
p = xdr_inline_decode(&stream, 4);
if (!p)
goto out_err_free;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5dd6fd555c72..c5c984519e32 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3300,6 +3300,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
.rpc_resp = &res,
};
int status;
+ int i;
bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
FATTR4_WORD0_FH_EXPIRE_TYPE |
@@ -3365,8 +3366,13 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->cache_consistency_bitmask[2] = 0;
+
+ /* Avoid a regression due to buggy server */
+ for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
+ res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
sizeof(server->exclcreat_bitmask));
+
server->acl_bitmask = res.acl_bitmask;
server->fh_expire_type = res.fh_expire_type;
}
@@ -8173,6 +8179,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ nfs4_schedule_session_recovery(clp->cl_session,
+ task->tk_status);
+ break;
default:
nfs4_schedule_lease_recovery(clp);
}
@@ -8251,7 +8263,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
- return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 4be6999299dc..ae00fc5edc5c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1647,13 +1647,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
}
-static void nfs4_reclaim_complete(struct nfs_client *clp,
+static int nfs4_reclaim_complete(struct nfs_client *clp,
const struct nfs4_state_recovery_ops *ops,
struct rpc_cred *cred)
{
/* Notify the server we're done reclaiming our state */
if (ops->reclaim_complete)
- (void)ops->reclaim_complete(clp, cred);
+ return ops->reclaim_complete(clp, cred);
+ return 0;
}
static void nfs4_clear_reclaim_server(struct nfs_server *server)
@@ -1700,13 +1701,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
{
const struct nfs4_state_recovery_ops *ops;
struct rpc_cred *cred;
+ int err;
if (!nfs4_state_clear_reclaim_reboot(clp))
return;
ops = clp->cl_mvops->reboot_recovery_ops;
cred = nfs4_get_clid_cred(clp);
- nfs4_reclaim_complete(clp, ops, cred);
+ err = nfs4_reclaim_complete(clp, ops, cred);
put_rpccred(cred);
+ if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
}
static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 3d17fc82b9fe..892c88542ebd 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1262,8 +1262,10 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
mirror = &desc->pg_mirrors[midx];
if (!list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev);
- if (index != prev->wb_index + 1)
- nfs_pageio_complete_mirror(desc, midx);
+ if (index != prev->wb_index + 1) {
+ nfs_pageio_complete(desc);
+ break;
+ }
}
}
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b8e44746f761..0e008db16b16 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1953,8 +1953,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
spin_lock(&inode->i_lock);
pnfs_set_plh_return_info(lo, range.iomode, 0);
- /* Block LAYOUTGET */
- set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
/*
* mark all matching lsegs so that we are sure to have no live
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
@@ -2308,10 +2306,20 @@ pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
enum pnfs_try_status trypnfs;
trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
- if (trypnfs == PNFS_TRY_AGAIN)
- pnfs_read_resend_pnfs(hdr);
- if (trypnfs == PNFS_NOT_ATTEMPTED || hdr->task.tk_status)
+ switch (trypnfs) {
+ case PNFS_NOT_ATTEMPTED:
pnfs_read_through_mds(desc, hdr);
+ case PNFS_ATTEMPTED:
+ break;
+ case PNFS_TRY_AGAIN:
+ /* cleanup hdr and prepare to redo pnfs */
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
+ list_splice_init(&hdr->pages, &mirror->pg_list);
+ mirror->pg_recoalesce = 1;
+ }
+ hdr->mds_ops->rpc_release(hdr);
+ }
}
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 51bf1f9ab287..2fdb8f5a7b69 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2613,6 +2613,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
/* initial superblock/root creation */
mount_info->fill_super(s, mount_info);
nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
+ if (!(server->flags & NFS_MOUNT_UNSHARED))
+ s->s_iflags |= SB_I_MULTIROOT;
}
mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9a3b3820306d..a8b786a648cd 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1847,40 +1847,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
return status;
}
-int nfs_commit_inode(struct inode *inode, int how)
+static int __nfs_commit_inode(struct inode *inode, int how,
+ struct writeback_control *wbc)
{
LIST_HEAD(head);
struct nfs_commit_info cinfo;
int may_wait = how & FLUSH_SYNC;
- int error = 0;
- int res;
+ int ret, nscan;
nfs_init_cinfo_from_inode(&cinfo, inode);
nfs_commit_begin(cinfo.mds);
- res = nfs_scan_commit(inode, &head, &cinfo);
- if (res)
- error = nfs_generic_commit_list(inode, &head, how, &cinfo);
+ for (;;) {
+ ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
+ if (ret <= 0)
+ break;
+ ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
+ if (ret < 0)
+ break;
+ ret = 0;
+ if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
+ if (nscan < wbc->nr_to_write)
+ wbc->nr_to_write -= nscan;
+ else
+ wbc->nr_to_write = 0;
+ }
+ if (nscan < INT_MAX)
+ break;
+ cond_resched();
+ }
nfs_commit_end(cinfo.mds);
- if (res == 0)
- return res;
- if (error < 0)
- goto out_error;
- if (!may_wait)
- goto out_mark_dirty;
- error = wait_on_commit(cinfo.mds);
- if (error < 0)
- return error;
- return res;
-out_error:
- res = error;
- /* Note: If we exit without ensuring that the commit is complete,
- * we must mark the inode as dirty. Otherwise, future calls to
- * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
- * that the data is on the disk.
- */
-out_mark_dirty:
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- return res;
+ if (ret || !may_wait)
+ return ret;
+ return wait_on_commit(cinfo.mds);
+}
+
+int nfs_commit_inode(struct inode *inode, int how)
+{
+ return __nfs_commit_inode(inode, how, NULL);
}
EXPORT_SYMBOL_GPL(nfs_commit_inode);
@@ -1890,11 +1893,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
int flags = FLUSH_SYNC;
int ret = 0;
- /* no commits means nothing needs to be done */
- if (!nfsi->commit_info.ncommit)
- return ret;
-
if (wbc->sync_mode == WB_SYNC_NONE) {
+ /* no commits means nothing needs to be done */
+ if (!nfsi->commit_info.ncommit)
+ goto check_requests_outstanding;
+
/* Don't commit yet if this is a non-blocking flush and there
* are a lot of outstanding writes for this mapping.
*/
@@ -1905,16 +1908,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
flags = 0;
}
- ret = nfs_commit_inode(inode, flags);
- if (ret >= 0) {
- if (wbc->sync_mode == WB_SYNC_NONE) {
- if (ret < wbc->nr_to_write)
- wbc->nr_to_write -= ret;
- else
- wbc->nr_to_write = 0;
- }
- return 0;
- }
+ ret = __nfs_commit_inode(inode, flags, wbc);
+ if (!ret) {
+ if (flags & FLUSH_SYNC)
+ return 0;
+ } else if (nfsi->commit_info.ncommit)
+ goto out_mark_dirty;
+
+check_requests_outstanding:
+ if (!atomic_read(&nfsi->commit_info.rpcs_out))
+ return ret;
out_mark_dirty:
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 022d95886d66..eef0caf6e67d 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1338,14 +1338,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
- int accmode;
+ int accmode = NFSD_MAY_READ_IF_EXEC;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
- accmode = NFSD_MAY_READ;
+ accmode |= NFSD_MAY_READ;
break;
case IOMODE_RW:
- accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
+ accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
break;
default:
dprintk("%s: invalid iomode %d\n",
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f463c4e0b2ea..12d780718b48 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -263,6 +263,35 @@ free_blocked_lock(struct nfsd4_blocked_lock *nbl)
kfree(nbl);
}
+static void
+remove_blocked_locks(struct nfs4_lockowner *lo)
+{
+ struct nfs4_client *clp = lo->lo_owner.so_client;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+ struct nfsd4_blocked_lock *nbl;
+ LIST_HEAD(reaplist);
+
+ /* Dequeue all blocked locks */
+ spin_lock(&nn->blocked_locks_lock);
+ while (!list_empty(&lo->lo_blocked)) {
+ nbl = list_first_entry(&lo->lo_blocked,
+ struct nfsd4_blocked_lock,
+ nbl_list);
+ list_del_init(&nbl->nbl_list);
+ list_move(&nbl->nbl_lru, &reaplist);
+ }
+ spin_unlock(&nn->blocked_locks_lock);
+
+ /* Now free them */
+ while (!list_empty(&reaplist)) {
+ nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
+ nbl_lru);
+ list_del_init(&nbl->nbl_lru);
+ posix_unblock_lock(&nbl->nbl_lock);
+ free_blocked_lock(nbl);
+ }
+}
+
static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
@@ -1854,6 +1883,7 @@ static __be32 mark_client_expired_locked(struct nfs4_client *clp)
static void
__destroy_client(struct nfs4_client *clp)
{
+ int i;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head reaplist;
@@ -1883,6 +1913,16 @@ __destroy_client(struct nfs4_client *clp)
nfs4_get_stateowner(&oo->oo_owner);
release_openowner(oo);
}
+ for (i = 0; i < OWNER_HASH_SIZE; i++) {
+ struct nfs4_stateowner *so, *tmp;
+
+ list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
+ so_strhash) {
+ /* Should be no openowners at this point */
+ WARN_ON_ONCE(so->so_is_open_owner);
+ remove_blocked_locks(lockowner(so));
+ }
+ }
nfsd4_return_all_client_layouts(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
@@ -6266,6 +6306,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
}
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
+ remove_blocked_locks(lo);
nfs4_put_stateowner(&lo->lo_owner);
return status;
@@ -7051,6 +7092,8 @@ nfs4_state_destroy_net(struct net *net)
}
}
+ WARN_ON(!list_empty(&nn->blocked_locks_lru));
+
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&nn->unconf_id_hashtbl[i])) {
clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
@@ -7117,7 +7160,6 @@ nfs4_state_shutdown_net(struct net *net)
struct nfs4_delegation *dp = NULL;
struct list_head *pos, *next, reaplist;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- struct nfsd4_blocked_lock *nbl;
cancel_delayed_work_sync(&nn->laundromat_work);
locks_end_grace(&nn->nfsd4_manager);
@@ -7138,24 +7180,6 @@ nfs4_state_shutdown_net(struct net *net)
nfs4_put_stid(&dp->dl_stid);
}
- BUG_ON(!list_empty(&reaplist));
- spin_lock(&nn->blocked_locks_lock);
- while (!list_empty(&nn->blocked_locks_lru)) {
- nbl = list_first_entry(&nn->blocked_locks_lru,
- struct nfsd4_blocked_lock, nbl_lru);
- list_move(&nbl->nbl_lru, &reaplist);
- list_del_init(&nbl->nbl_list);
- }
- spin_unlock(&nn->blocked_locks_lock);
-
- while (!list_empty(&reaplist)) {
- nbl = list_first_entry(&reaplist,
- struct nfsd4_blocked_lock, nbl_lru);
- list_del_init(&nbl->nbl_lru);
- posix_unblock_lock(&nbl->nbl_lock);
- free_blocked_lock(nbl);
- }
-
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b829cc9a9b39..8f0b19a3ca81 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -94,6 +94,12 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
err = follow_down(&path);
if (err < 0)
goto out;
+ if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
+ nfsd_mountpoint(dentry, exp) == 2) {
+ /* This is only a mountpoint in some other namespace */
+ path_put(&path);
+ goto out;
+ }
exp2 = rqst_exp_get_by_name(rqstp, &path);
if (IS_ERR(exp2)) {
@@ -167,16 +173,26 @@ static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, st
/*
* For nfsd purposes, we treat V4ROOT exports as though there was an
* export at *every* directory.
+ * We return:
+ * '1' if this dentry *must* be an export point,
+ * '2' if it might be, if there is really a mount here, and
+ * '0' if there is no chance of an export point here.
*/
int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
{
- if (d_mountpoint(dentry))
+ if (!d_inode(dentry))
+ return 0;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
return 1;
if (nfsd4_is_junction(dentry))
return 1;
- if (!(exp->ex_flags & NFSEXP_V4ROOT))
- return 0;
- return d_inode(dentry) != NULL;
+ if (d_mountpoint(dentry))
+ /*
+ * Might only be a mountpoint in a different namespace,
+ * but we need to check.
+ */
+ return 2;
+ return 0;
}
__be32
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index e0e5f7c3c99f..8a459b179183 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
u32 event_mask,
void *data, int data_type)
{
- __u32 marks_mask, marks_ignored_mask;
+ __u32 marks_mask = 0, marks_ignored_mask = 0;
struct path *path = data;
pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
!d_can_lookup(path->dentry))
return false;
- if (inode_mark && vfsmnt_mark) {
- marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
- marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
- } else if (inode_mark) {
- /*
- * if the event is for a child and this inode doesn't care about
- * events on the child, don't send it!
- */
- if ((event_mask & FS_EVENT_ON_CHILD) &&
- !(inode_mark->mask & FS_EVENT_ON_CHILD))
- return false;
- marks_mask = inode_mark->mask;
- marks_ignored_mask = inode_mark->ignored_mask;
- } else if (vfsmnt_mark) {
- marks_mask = vfsmnt_mark->mask;
- marks_ignored_mask = vfsmnt_mark->ignored_mask;
- } else {
- BUG();
+ /*
+ * if the event is for a child and this inode doesn't care about
+ * events on the child, don't send it!
+ */
+ if (inode_mark &&
+ (!(event_mask & FS_EVENT_ON_CHILD) ||
+ (inode_mark->mask & FS_EVENT_ON_CHILD))) {
+ marks_mask |= inode_mark->mask;
+ marks_ignored_mask |= inode_mark->ignored_mask;
+ }
+
+ if (vfsmnt_mark) {
+ marks_mask |= vfsmnt_mark->mask;
+ marks_ignored_mask |= vfsmnt_mark->ignored_mask;
}
if (d_is_dir(path->dentry) &&
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 629d8c917fa6..6e35ef6521b4 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -559,6 +559,11 @@ void orangefs_kill_sb(struct super_block *sb)
/* provided sb cleanup */
kill_anon_super(sb);
+ if (!ORANGEFS_SB(sb)) {
+ mutex_lock(&orangefs_request_mutex);
+ mutex_unlock(&orangefs_request_mutex);
+ return;
+ }
/*
* issue the unmount to userspace to tell it to remove the
* dynamic mount info it has for this superblock
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index f61b00887481..cbca58ba008a 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -124,7 +124,14 @@ retry_servicing:
gossip_debug(GOSSIP_WAIT_DEBUG,
"%s:client core is NOT in service.\n",
__func__);
- timeout = op_timeout_secs * HZ;
+ /*
+ * Don't wait for the userspace component to return if
+ * the filesystem is being umounted anyway.
+ */
+ if (op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT)
+ timeout = 0;
+ else
+ timeout = op_timeout_secs * HZ;
}
spin_unlock(&orangefs_request_list_lock);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 306b6c161840..8546384a5fdf 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -180,6 +180,9 @@ static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
inc_nlink(inode);
}
d_instantiate(dentry, inode);
+ /* Force lookup of new upper hardlink to find its lower */
+ if (hardlink)
+ d_drop(dentry);
}
static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 7fb53d055537..16f6db88c8e5 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -227,6 +227,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
return res;
}
+static bool ovl_can_list(const char *s)
+{
+ /* List all non-trusted xatts */
+ if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+ return true;
+
+ /* Never list trusted.overlay, list other trusted for superuser only */
+ return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -250,7 +260,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return -EIO;
len -= slen;
- if (ovl_is_private_xattr(s)) {
+ if (!ovl_can_list(s)) {
res -= slen;
memmove(s, s + slen, len);
} else {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0edc16f95596..39d3326ff6d8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -252,7 +252,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
* Inherently racy -- command line shares address space
* with code and data.
*/
- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
if (rv <= 0)
goto out_free_page;
@@ -270,7 +270,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
int nr_read;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -305,7 +305,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -354,7 +354,7 @@ skip_argv:
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -970,7 +970,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
- retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
+ retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
if (retval <= 0) {
ret = retval;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index bc2dde2423c2..2a5c4813c47d 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1959,7 +1959,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
* will be requeued because superblock is being shutdown and doesn't
* have MS_ACTIVE set.
*/
- cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
+ reiserfs_cancel_old_flush(sb);
/* wait for all commits to finish */
cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
@@ -2640,7 +2640,7 @@ static int journal_init_dev(struct super_block *super,
if (IS_ERR(journal->j_dev_bd)) {
result = PTR_ERR(journal->j_dev_bd);
journal->j_dev_bd = NULL;
- reiserfs_warning(super,
+ reiserfs_warning(super, "sh-457",
"journal_init_dev: Cannot open '%s': %i",
jdev_name, result);
return result;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 5dcf3ab83886..6ca00471afbf 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
struct reiserfs_list_bitmap *, unsigned int);
void reiserfs_schedule_old_flush(struct super_block *s);
+void reiserfs_cancel_old_flush(struct super_block *s);
void add_save_link(struct reiserfs_transaction_handle *th,
struct inode *inode, int truncate);
int remove_save_link(struct inode *inode, int truncate);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index e101d70d2327..dec6c93044fa 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work)
s = sbi->s_journal->j_work_sb;
spin_lock(&sbi->old_work_lock);
- sbi->work_queued = 0;
+ /* Avoid clobbering the cancel state... */
+ if (sbi->work_queued == 1)
+ sbi->work_queued = 0;
spin_unlock(&sbi->old_work_lock);
reiserfs_sync_fs(s, 1);
@@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
spin_unlock(&sbi->old_work_lock);
}
-static void cancel_old_flush(struct super_block *s)
+void reiserfs_cancel_old_flush(struct super_block *s)
{
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
spin_lock(&sbi->old_work_lock);
- sbi->work_queued = 0;
+ /* Make sure no new flushes will be queued */
+ sbi->work_queued = 2;
spin_unlock(&sbi->old_work_lock);
+ cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
}
static int reiserfs_freeze(struct super_block *s)
{
struct reiserfs_transaction_handle th;
- cancel_old_flush(s);
+ reiserfs_cancel_old_flush(s);
reiserfs_write_lock(s);
if (!(s->s_flags & MS_RDONLY)) {
@@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s)
static int reiserfs_unfreeze(struct super_block *s)
{
+ struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+
reiserfs_allow_writes(s);
+ spin_lock(&sbi->old_work_lock);
+ /* Allow old_work to run again */
+ sbi->work_queued = 0;
+ spin_unlock(&sbi->old_work_lock);
return 0;
}
@@ -2194,7 +2203,7 @@ error_unlocked:
if (sbi->commit_wq)
destroy_workqueue(sbi->commit_wq);
- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+ reiserfs_cancel_old_flush(s);
reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s))
diff --git a/fs/super.c b/fs/super.c
index 1058bf3e8724..7e9beab77259 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -519,7 +519,11 @@ retry:
hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(type);
- register_shrinker(&s->s_shrink);
+ err = register_shrinker(&s->s_shrink);
+ if (err) {
+ deactivate_locked_super(s);
+ s = ERR_PTR(err);
+ }
return s;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 4ec051089186..03dda1cbe485 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
dbg_save_space_info(c);
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ ubifs_ro_mode(c, err);
+ }
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb)
int err;
/* Synchronize write-buffers */
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ ubifs_ro_mode(c, err);
+ }
/*
* We are being cleanly unmounted which means the
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 695389a4fc23..3a3be23689b3 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,6 +28,9 @@
#include "udf_sb.h"
+#define SURROGATE_MASK 0xfffff800
+#define SURROGATE_PAIR 0x0000d800
+
static int udf_uni2char_utf8(wchar_t uni,
unsigned char *out,
int boundlen)
@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni,
if (boundlen <= 0)
return -ENAMETOOLONG;
+ if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
+ return -EINVAL;
+
if (uni < 0x80) {
out[u_len++] = (unsigned char)uni;
} else if (uni < 0x800) {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 362c6b4c1186..1410835eebe2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -846,22 +846,26 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_INSERT_RANGE) {
- unsigned int blksize_mask = i_blocksize(inode) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
+ loff_t isize = i_size_read(inode);
- new_size = i_size_read(inode) + len;
if (offset & blksize_mask || len & blksize_mask) {
error = -EINVAL;
goto out_unlock;
}
- /* check the new inode size does not wrap through zero */
- if (new_size > inode->i_sb->s_maxbytes) {
+ /*
+ * New inode size must not exceed ->s_maxbytes, accounting for
+ * possible signed overflow.
+ */
+ if (inode->i_sb->s_maxbytes - isize < len) {
error = -EFBIG;
goto out_unlock;
}
+ new_size = isize + len;
/* Offset should be less than i_size */
- if (offset >= i_size_read(inode)) {
+ if (offset >= isize) {
error = -EINVAL;
goto out_unlock;
}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 1fdd3face2d9..e3edaa547216 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -47,7 +47,7 @@
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
-
+STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
/*
* We use the batch lookup interface to iterate over the dquots as it
@@ -694,9 +694,17 @@ xfs_qm_init_quotainfo(
qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
- register_shrinker(&qinf->qi_shrinker);
+
+ error = register_shrinker(&qinf->qi_shrinker);
+ if (error)
+ goto out_free_inos;
+
return 0;
+out_free_inos:
+ mutex_destroy(&qinf->qi_quotaofflock);
+ mutex_destroy(&qinf->qi_tree_lock);
+ xfs_qm_destroy_quotainos(qinf);
out_free_lru:
list_lru_destroy(&qinf->qi_lru);
out_free_qinf:
@@ -705,7 +713,6 @@ out_free_qinf:
return error;
}
-
/*
* Gets called when unmounting a filesystem or when all quotas get
* turned off.
@@ -722,19 +729,8 @@ xfs_qm_destroy_quotainfo(
unregister_shrinker(&qi->qi_shrinker);
list_lru_destroy(&qi->qi_lru);
-
- if (qi->qi_uquotaip) {
- IRELE(qi->qi_uquotaip);
- qi->qi_uquotaip = NULL; /* paranoia */
- }
- if (qi->qi_gquotaip) {
- IRELE(qi->qi_gquotaip);
- qi->qi_gquotaip = NULL;
- }
- if (qi->qi_pquotaip) {
- IRELE(qi->qi_pquotaip);
- qi->qi_pquotaip = NULL;
- }
+ xfs_qm_destroy_quotainos(qi);
+ mutex_destroy(&qi->qi_tree_lock);
mutex_destroy(&qi->qi_quotaofflock);
kmem_free(qi);
mp->m_quotainfo = NULL;
@@ -1620,6 +1616,24 @@ error_rele:
}
STATIC void
+xfs_qm_destroy_quotainos(
+ xfs_quotainfo_t *qi)
+{
+ if (qi->qi_uquotaip) {
+ IRELE(qi->qi_uquotaip);
+ qi->qi_uquotaip = NULL; /* paranoia */
+ }
+ if (qi->qi_gquotaip) {
+ IRELE(qi->qi_gquotaip);
+ qi->qi_gquotaip = NULL;
+ }
+ if (qi->qi_pquotaip) {
+ IRELE(qi->qi_pquotaip);
+ qi->qi_pquotaip = NULL;
+ }
+}
+
+STATIC void
xfs_qm_dqfree_one(
struct xfs_dquot *dqp)
{
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index c93dbadfc71d..db7d15b6a580 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -783,6 +783,15 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+/* Global interrupt format */
+
+struct acpi_iort_smmu_gsi {
+ u32 nsg_irpt;
+ u32 nsg_irpt_flags;
+ u32 nsg_cfg_irpt;
+ u32 nsg_cfg_irpt_flags;
+};
+
struct acpi_iort_smmu_v3 {
u64 base_address; /* SMMUv3 base address */
u32 flags;
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 8f66aaabadf7..9e3f7618593f 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -48,7 +48,17 @@
* Use compiler specific <stdarg.h> is a good practice for even when
* -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
*/
+#ifndef va_arg
+#ifdef ACPI_USE_BUILTIN_STDARG
+typedef __builtin_va_list va_list;
+#define va_start(v, l) __builtin_va_start(v, l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v, l) __builtin_va_arg(v, l)
+#define va_copy(d, s) __builtin_va_copy(d, s)
+#else
#include <stdarg.h>
+#endif
+#endif
#define ACPI_INLINE __inline__
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index 17bd3b7b4e5a..bdb6858e2458 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -48,7 +48,9 @@
* Use compiler specific <stdarg.h> is a good practice for even when
* -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
*/
+#ifndef va_arg
#include <stdarg.h>
+#endif
/* Configuration specific to Intel 64-bit C compiler */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c9d804..f0d8b1c51343 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
*/
/**
- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -25,18 +25,11 @@
* <0 - On error
*/
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
preempt_disable();
pagefault_disable();
@@ -74,17 +67,9 @@ out_pagefault_enable:
pagefault_enable();
preempt_enable();
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (ret == 0)
+ *oval = oldval;
+
return ret;
}
@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#else
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index c4f8fd2fd384..f6ea0f3c03f8 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -764,6 +764,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud);
+int pmd_free_pte_page(pmd_t *pmd);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
@@ -781,6 +783,14 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
+static inline int pud_free_pmd_page(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd)
+{
+ return 0;
+}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 2e6000a4eb2c..1462071a19bf 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -170,7 +170,7 @@
#endif
#ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN(); \
+#define EARLYCON_TABLE() . = ALIGN(8); \
VMLINUX_SYMBOL(__earlycon_table) = .; \
*(__earlycon_table) \
VMLINUX_SYMBOL(__earlycon_table_end) = .;
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 982c299e435a..642d0597bb73 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -74,5 +74,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
+extern bool drm_kms_helper_is_poll_worker(void);
#endif
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
index 2062c67e2e51..a72db8d23ed6 100644
--- a/include/dt-bindings/clock/mt2701-clk.h
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -176,7 +176,8 @@
#define CLK_TOP_AUD_EXT1 156
#define CLK_TOP_AUD_EXT2 157
#define CLK_TOP_NFI1X_PAD 158
-#define CLK_TOP_NR 159
+#define CLK_TOP_AXISEL_D4 159
+#define CLK_TOP_NR 160
/* APMIXEDSYS */
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 88e64846cf37..cdeafd9cab07 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -81,6 +81,7 @@
#define R8A7794_CLK_SCIF2 19
#define R8A7794_CLK_SCIF1 20
#define R8A7794_CLK_SCIF0 21
+#define R8A7794_CLK_DU1 23
#define R8A7794_CLK_DU0 24
/* MSTP8 */
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
new file mode 100644
index 000000000000..4b1548129fa2
--- /dev/null
+++ b/include/kvm/arm_psci.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_ARM_PSCI_H__
+#define __KVM_ARM_PSCI_H__
+
+#include <linux/kvm_host.h>
+#include <uapi/linux/psci.h>
+
+#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
+#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
+#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
+
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+
+/*
+ * We need the KVM pointer independently from the vcpu as we can call
+ * this from HYP, and need to apply kern_hyp_va on it...
+ */
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+{
+ /*
+ * Our PSCI implementation stays the same across versions from
+ * v0.2 onward, only adding the few mandatory functions (such
+ * as FEATURES with 1.0) that are required by newer
+ * revisions. It is thus safe to return the latest, unless
+ * userspace has instructed us otherwise.
+ */
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+ if (vcpu->kvm->arch.psci_version)
+ return vcpu->kvm->arch.psci_version;
+
+ return KVM_ARM_PSCI_LATEST;
+ }
+
+ return KVM_ARM_PSCI_0_1;
+}
+
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+struct kvm_one_reg;
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
+#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38c653..a031897fca76 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,14 +14,16 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <uapi/linux/const.h>
+
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
* http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
*/
-#define ARM_SMCCC_STD_CALL 0
-#define ARM_SMCCC_FAST_CALL 1
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
#define ARM_SMCCC_TYPE_SHIFT 31
#define ARM_SMCCC_SMC_32 0
@@ -60,6 +62,24 @@
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST "smc #0"
+#define SMCCC_HVC_INST "hvc #0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST __SMC(0)
+#define SMCCC_HVC_INST __HVC(0)
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...) \
+ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_write_0 \
+ "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_1 \
+ "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_2 \
+ "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
+#define __constraint_write_3 \
+ "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
+#define __constraint_write_4 __constraint_write_3
+#define __constraint_write_5 __constraint_write_4
+#define __constraint_write_6 __constraint_write_5
+#define __constraint_write_7 __constraint_write_6
+
+#define __constraint_read_0
+#define __constraint_read_1
+#define __constraint_read_2
+#define __constraint_read_3
+#define __constraint_read_4 "r" (r4)
+#define __constraint_read_5 __constraint_read_4, "r" (r5)
+#define __constraint_read_6 __constraint_read_5, "r" (r6)
+#define __constraint_read_7 __constraint_read_6, "r" (r7)
+
+#define __declare_arg_0(a0, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_1(a0, a1, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_2(a0, a1, a2, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register typeof(a2) r2 asm("r2") = a2; \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_3(a0, a1, a2, a3, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register typeof(a2) r2 asm("r2") = a2; \
+ register typeof(a3) r3 asm("r3") = a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ __declare_arg_3(a0, a1, a2, a3, res); \
+ register typeof(a4) r4 asm("r4") = a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ register typeof(a5) r5 asm("r5") = a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ register typeof(a6) r6 asm("r6") = a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ register typeof(a7) r7 asm("r7") = a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count) \
+ : __constraint_write_ ## count \
+ : __constraint_read_ ## count \
+ : "memory"
+#define __constraints(count) ___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...) \
+ do { \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm volatile(inst "\n" \
+ __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c357f27d5483..32728ff8095c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
set_wb_congested(bdi->wb.congested, sync);
}
+struct wb_lock_cookie {
+ bool locked;
+ unsigned long flags;
+};
+
#ifdef CONFIG_CGROUP_WRITEBACK
/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 43b93a947e61..63f17b106a4a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
@@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction. IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction. IRQs may or may not be disabled on
+ * return.
*/
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
@@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
* Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
- if (unlikely(*lockedp))
- spin_lock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
/*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
@@ -402,12 +402,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- if (unlikely(locked))
- spin_unlock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
rcu_read_unlock();
}
@@ -454,12 +455,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
}
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
return inode_to_wb(inode);
}
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ccd2a5addb56..3281593f6299 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -611,13 +611,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de179993e039..01225b0059b1 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,8 @@
* with any version that can compile the kernel
*/
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/* Clang doesn't have a way to turn it off per-function, yet. */
+#ifdef __noretpoline
+#undef __noretpoline
+#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index eb0ed31193a3..a1b1de17455c 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -88,6 +88,10 @@
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
+#ifdef RETPOLINE
+#define __noretpoline __attribute__((indirect_branch("keep")))
+#endif
+
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2d65bbd6dbd1..18ba29ff1449 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -680,6 +680,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return mask != NULL;
+}
+
#else
typedef struct cpumask cpumask_var_t[1];
@@ -720,6 +725,11 @@ static inline void free_cpumask_var(cpumask_var_t mask)
static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
+
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return true;
+}
#endif /* CONFIG_CPUMASK_OFFSTACK */
/* It's common to want to use cpu_all_mask in struct member initializers,
diff --git a/include/linux/dax.h b/include/linux/dax.h
index add6c4bc568f..ed9cf2f5cd06 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -61,11 +61,6 @@ static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
-static inline bool vma_is_dax(struct vm_area_struct *vma)
-{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
-}
-
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ab1946f4a729..8582de171bce 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -18,6 +18,7 @@
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
+#include <linux/mm_types.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fiemap.h>
@@ -1318,6 +1319,7 @@ struct mm_struct;
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
@@ -3033,6 +3035,25 @@ static inline bool io_is_direct(struct file *filp)
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
}
+static inline bool vma_is_dax(struct vm_area_struct *vma)
+{
+ return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+}
+
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ if (!vma->vm_file)
+ return false;
+ if (!vma_is_dax(vma))
+ return false;
+ inode = file_inode(vma->vm_file);
+ if (S_ISCHR(inode->i_mode))
+ return false; /* device-dax */
+ return true;
+}
+
static inline int iocb_flags(struct file *file)
{
int res = 0;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index c332f0a45607..3fdfede2f0f3 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -734,11 +734,7 @@ struct fsl_ifc_nand {
u32 res19[0x10];
__be32 nand_fsr;
u32 res20;
- /* The V1 nand_eccstat is actually 4 words that overlaps the
- * V2 nand_eccstat.
- */
- __be32 v1_nand_eccstat[2];
- __be32 v2_nand_eccstat[6];
+ __be32 nand_eccstat[8];
u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b2ec82712baa..fab65b61d6d4 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -801,7 +801,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
+int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
@@ -1106,13 +1106,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*
* @report: the report we want to know the length
*/
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
{
/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
return ((report->size - 1) >> 3) + 1 + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/* HID quirks API */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8feecd5345e7..7e39719e27cb 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -600,7 +600,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -610,6 +610,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (likely(!eth_type_vlan(protocol)))
return false;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -627,7 +630,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/init.h b/include/linux/init.h
index 8e346d1bd837..0cca4142987f 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -5,10 +5,10 @@
#include <linux/types.h>
/* Built-in __init functions needn't be compiled with retpoline */
-#if defined(RETPOLINE) && !defined(MODULE)
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#if defined(__noretpoline) && !defined(MODULE)
+#define __noinitretpoline __noretpoline
#else
-#define __noretpoline
+#define __noinitretpoline
#endif
/* These macros are used to mark some functions or
@@ -46,7 +46,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold notrace __latent_entropy __noretpoline
+#define __init __section(.init.text) __cold notrace __latent_entropy __noinitretpoline
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
@@ -133,6 +133,9 @@ void prepare_namespace(void);
void __init load_default_modules(void);
int __init init_rootfs(void);
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
+extern bool rodata_enabled;
+#endif
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 589d14e970ad..c2a0f0072274 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
+#include <linux/cache.h>
#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -63,19 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/* some arch's have a small-data section that can be accessed register-relative
- * but that can only take up to, say, 4-byte variables. jiffies being part of
- * an 8-byte variable may not be correctly accessed unless we force the issue
- */
-#define __jiffy_data __attribute__((section(".data")))
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
/*
* The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
-extern u64 __jiffy_data jiffies_64;
-extern unsigned long volatile __jiffy_data jiffies;
+extern u64 __cacheline_aligned_in_smp jiffies_64;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7894d55e4998..f75aa98f3d2c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -46,6 +46,7 @@
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fd4ca0b4fe0f..ac6796138ba0 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -88,6 +88,23 @@ static inline void init_llist_head(struct llist_head *list)
container_of(ptr, type, member)
/**
+ * member_address_is_nonnull - check whether the member address is not NULL
+ * @ptr: the object pointer (struct type * that contains the llist_node)
+ * @member: the name of the llist_node within the struct.
+ *
+ * This macro is conceptually the same as
+ * &ptr->member != NULL
+ * but it works around the fact that compilers can decide that taking a member
+ * address is never a NULL pointer.
+ *
+ * Real objects that start at a high address and have a member at NULL are
+ * unlikely to exist, but such pointers may be returned e.g. by the
+ * container_of() macro.
+ */
+#define member_address_is_nonnull(ptr, member) \
+ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
+
+/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
@@ -121,7 +138,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
- &(pos)->member != NULL; \
+ member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
@@ -143,7 +160,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
- &pos->member != NULL && \
+ member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f62ce8d..8e2828d48d7f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
u16 rate_val;
};
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 58276144ba81..5f3e62253e2f 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -750,8 +750,14 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 6,
- CQE_RSS_HTYPE_L4 = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = 0x3 << 2,
+ /* cqe->rss_hash_type[3:2] - IP destination selected for hash
+ * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
+ */
+ CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
+ * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
+ */
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 6a620e01b040..7751d72a6a40 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -380,8 +380,8 @@ struct mlx5_core_srq {
struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
- int max_gs;
- int max_avail_gather;
+ size_t max_gs;
+ size_t max_avail_gather;
int wqe_shift;
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2217e2f18247..493d07931ea5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
+#ifndef lm_alias
+#define lm_alias(x) __va(__pa_symbol(x))
+#endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
@@ -1288,6 +1292,19 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+ unsigned long nr_pages, unsigned int gup_flags,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
@@ -2229,6 +2246,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
#define FOLL_COW 0x4000 /* internal GUP flag */
+#define FOLL_ANON 0x8000 /* don't do file mappings */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index b4d10155af54..e64c02af5894 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -255,6 +255,8 @@ unsigned int *xt_alloc_entry_offsets(unsigned int size);
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size);
+int xt_check_proc_name(const char *name, unsigned int size);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
@@ -382,38 +384,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
+struct xt_percpu_counter_alloc_state {
+ unsigned int off;
+ const char __percpu *mem;
+};
-/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
- * real (percpu) counter. On !SMP, its just the packet count,
- * so nothing needs to be done there.
- *
- * xt_percpu_counter_alloc returns the address of the percpu
- * counter, or 0 on !SMP. We force an alignment of 16 bytes
- * so that bytes/packets share a common cache line.
- *
- * Hence caller must use IS_ERR_VALUE to check for error, this
- * allows us to return 0 for single core systems without forcing
- * callers to deal with SMP vs. NONSMP issues.
- */
-static inline unsigned long xt_percpu_counter_alloc(void)
-{
- if (nr_cpu_ids > 1) {
- void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
- sizeof(struct xt_counters));
-
- if (res == NULL)
- return -ENOMEM;
-
- return (__force unsigned long) res;
- }
-
- return 0;
-}
-static inline void xt_percpu_counter_free(u64 pcnt)
-{
- if (nr_cpu_ids > 1)
- free_percpu((void __percpu *) (unsigned long) pcnt);
-}
+bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
+ struct xt_counters *counter);
+void xt_percpu_counter_free(struct xt_counters *cnt);
static inline struct xt_counters *
xt_get_this_cpu_counter(struct xt_counters *cnt)
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index fbc98e2c8228..e791ebc65c9c 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -5,6 +5,7 @@
#ifndef _LINUX_NOSPEC_H
#define _LINUX_NOSPEC_H
+#include <asm/barrier.h>
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
@@ -30,26 +31,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#endif
/*
- * Warn developers about inappropriate array_index_nospec() usage.
- *
- * Even if the CPU speculates past the WARN_ONCE branch, the
- * sign bit of @index is taken into account when generating the
- * mask.
- *
- * This warning is compiled out when the compiler can infer that
- * @index and @size are less than LONG_MAX.
- */
-#define array_index_mask_nospec_check(index, size) \
-({ \
- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
- "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
- _mask = 0; \
- else \
- _mask = array_index_mask_nospec(index, size); \
- _mask; \
-})
-
-/*
* array_index_nospec - sanitize an array index after a bounds check
*
* For a code sequence like:
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
({ \
typeof(index) _i = (index); \
typeof(size) _s = (size); \
- unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
+ unsigned long _mask = array_index_mask_nospec(_i, _s); \
\
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
\
- _i &= _mask; \
- _i; \
+ (typeof(_i)) (_i & _mask); \
})
#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7dbe9148b2f8..35f4c4d9c405 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -148,7 +148,7 @@ static inline int page_cache_get_speculative(struct page *page)
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
@@ -186,7 +186,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_add(page, count);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1b711796d989..36522905685b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1348,9 +1348,9 @@ static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
unsigned int min_vecs, unsigned int max_vecs,
unsigned int flags)
{
- if (min_vecs > 1)
- return -EINVAL;
- return 1;
+ if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
+ return 1;
+ return -ENOSPC;
}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
index 1419133fa69e..4ac1a070af0a 100644
--- a/include/linux/platform_data/isl9305.h
+++ b/include/linux/platform_data/isl9305.h
@@ -24,7 +24,7 @@
struct regulator_init_data;
struct isl9305_pdata {
- struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
+ struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1];
};
#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 34c4498b800f..83b22ae9ae12 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -59,23 +59,23 @@ struct posix_clock_operations {
int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
- int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+ int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
- int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+ int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts);
int (*clock_settime)(struct posix_clock *pc,
- const struct timespec *ts);
+ const struct timespec64 *ts);
int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
void (*timer_gettime)(struct posix_clock *pc,
- struct k_itimer *kit, struct itimerspec *tsp);
+ struct k_itimer *kit, struct itimerspec64 *tsp);
int (*timer_settime)(struct posix_clock *pc,
struct k_itimer *kit, int flags,
- struct itimerspec *tsp, struct itimerspec *old);
+ struct itimerspec64 *tsp, struct itimerspec64 *old);
/*
* Optional character device methods:
*/
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..347077cf19c6 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
+enum psci_conduit {
+ PSCI_CONDUIT_NONE,
+ PSCI_CONDUIT_SMC,
+ PSCI_CONDUIT_HVC,
+};
+
+enum smccc_version {
+ SMCCC_VERSION_1_0,
+ SMCCC_VERSION_1_1,
+};
+
struct psci_operations {
+ u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -33,6 +45,8 @@ struct psci_operations {
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
+ enum psci_conduit conduit;
+ enum smccc_version smccc_version;
};
extern struct psci_operations psci_ops;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 37b532410528..3c3786df044c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -425,6 +425,8 @@ struct regulator_dev {
struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
+ unsigned int is_switch:1;
+
/* time when this regulator was disabled last time */
unsigned long last_off_jiffy;
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5c132d3188be..85d1ffc90285 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -706,8 +706,10 @@ slow_path:
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
data = rht_obj(ht, head);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e775696b480a..540aeda9e5b0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1436,6 +1436,7 @@ struct sched_dl_entity {
u64 dl_deadline; /* relative deadline of each instance */
u64 dl_period; /* separation of two instances (period) */
u64 dl_bw; /* dl_runtime / dl_deadline */
+ u64 dl_density; /* dl_runtime / dl_deadline */
/*
* Actual scheduling parameters. Initialized with the values above,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 344201437017..7b16c5322673 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -347,10 +347,10 @@ struct earlycon_id {
char name[16];
char compatible[128];
int (*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
-extern const struct earlycon_id __earlycon_table[];
-extern const struct earlycon_id __earlycon_table_end[];
+extern const struct earlycon_id *__earlycon_table[];
+extern const struct earlycon_id *__earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
@@ -358,12 +358,19 @@ extern const struct earlycon_id __earlycon_table_end[];
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
-#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
- EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
+#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
+ static const struct earlycon_id unique_id \
+ EARLYCON_USED_OR_UNUSED __initconst \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }
+ .setup = fn }; \
+ static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
+ __section(__earlycon_table) \
+ * const __PASTE(__p, unique_id) = &unique_id
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ _OF_EARLYCON_DECLARE(_name, compat, fn, \
+ __UNIQUE_ID(__earlycon_##_name))
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dca387a8fa6b..09883c2a3164 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -985,10 +985,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
-int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
- int offset, int len);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
- int len);
+int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
+int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index a41244fe58d0..fe1b8623a3a1 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -355,6 +355,7 @@ struct tty_file_private {
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
+#define TTY_HUPPING 19 /* Hangup in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
/* Values for tty->flow_change */
@@ -656,7 +657,7 @@ extern int tty_unregister_ldisc(int disc);
extern int tty_set_ldisc(struct tty_struct *tty, int disc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
char *f, int count);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index de2a722fe3cf..ea4f81c2a6d5 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -56,4 +56,7 @@
*/
#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+/* Device needs a pause after every control message. */
+#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb5479a425..3f8f35053260 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -143,6 +143,9 @@ int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+#define virtio_device_for_each_vq(vdev, vq) \
+ list_for_each_entry(vq, &vdev->vqs, list)
+
/**
* virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1061add575d2..1def337b16d4 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -453,6 +453,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
+extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 554671c81f4a..4931787193c3 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -893,7 +893,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role);
+ u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f32f7ef8a23a..7734cc9c7d29 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -197,6 +197,7 @@ struct bonding {
struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
bool force_primary;
+ u32 nest_level;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 66167138120a..9d57639223c3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -947,9 +947,9 @@ enum rate_info_flags {
* @RATE_INFO_BW_160: 160 MHz bandwidth
*/
enum rate_info_bw {
+ RATE_INFO_BW_20 = 0,
RATE_INFO_BW_5,
RATE_INFO_BW_10,
- RATE_INFO_BW_20,
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c9b3eb70f340..567017b5fc9e 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -55,6 +55,7 @@ struct inet_timewait_sock {
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
+#define tw_reuseport __tw_common.skc_reuseport
#define tw_ipv6only __tw_common.skc_ipv6only
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_nulls_node
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..ea985aa7a6c5 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3334dbfa5aa4..7fc78663ec9d 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -6,7 +6,7 @@
static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
{
- return remaining >= sizeof(*rtnh) &&
+ return remaining >= (int)sizeof(*rtnh) &&
rtnh->rtnh_len >= sizeof(*rtnh) &&
rtnh->rtnh_len <= remaining;
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 5d2c9b89c168..1f49824f9fc8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -690,6 +690,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
*to_free = skb;
}
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
@@ -810,6 +820,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
index 8716d5942b65..8fcf8908a694 100644
--- a/include/net/slhc_vj.h
+++ b/include/net/slhc_vj.h
@@ -127,6 +127,7 @@ typedef __u32 int32;
*/
struct cstate {
byte_t cs_this; /* connection id number (xmit) */
+ bool initialized; /* true if initialized */
struct cstate *next; /* next in ring (xmit) */
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
struct tcphdr cs_tcp;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index caf35e062639..18f029bcb8c7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1265,9 +1265,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
static inline int tcp_win_from_space(int space)
{
- return sysctl_tcp_adv_win_scale<=0 ?
- (space>>(-sysctl_tcp_adv_win_scale)) :
- space - (space>>sysctl_tcp_adv_win_scale);
+ int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
+
+ return tcp_adv_win_scale <= 0 ?
+ (space>>(-tcp_adv_win_scale)) :
+ space - (space>>tcp_adv_win_scale);
}
/* Note: caller must be prepared to deal with negative returns */
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 80761938b9a7..8228155b305e 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
UDP_SKB_CB(skb)->cscov = cscov;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_valid = 0;
}
return 0;
diff --git a/include/net/x25.h b/include/net/x25.h
index c383aa4edbf0..6d30a01d281d 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);
/* sysctl_net_x25.c */
#ifdef CONFIG_SYSCTL
-void x25_register_sysctl(void);
+int x25_register_sysctl(void);
void x25_unregister_sysctl(void);
#else
-static inline void x25_register_sysctl(void) {};
+static inline int x25_register_sysctl(void) { return 0; };
static inline void x25_unregister_sysctl(void) {};
#endif /* CONFIG_SYSCTL */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 818a38f99221..f888263fd757 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -129,6 +129,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
const unsigned char *dst_dev_addr);
int rdma_addr_size(struct sockaddr *addr);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr);
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 70339d7958c0..226f915a68c2 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -243,6 +243,7 @@ static inline int qe_alive_during_sleep(void)
#define qe_muram_free cpm_muram_free
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
+#define qe_muram_dma cpm_muram_dma
#define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
#define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
@@ -667,6 +668,10 @@ struct ucc_slow_pram {
#define UCC_FAST_GUMR_CTSS 0x00800000
#define UCC_FAST_GUMR_TXSY 0x00020000
#define UCC_FAST_GUMR_RSYN 0x00010000
+#define UCC_FAST_GUMR_SYNL_MASK 0x0000C000
+#define UCC_FAST_GUMR_SYNL_16 0x0000C000
+#define UCC_FAST_GUMR_SYNL_8 0x00008000
+#define UCC_FAST_GUMR_SYNL_AUTO 0x00004000
#define UCC_FAST_GUMR_RTSM 0x00002000
#define UCC_FAST_GUMR_REVD 0x00000400
#define UCC_FAST_GUMR_ENR 0x00000020
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f229a1..4142757080f8 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
*
*/
+#include <linux/nospec.h>
#include <sound/asound.h>
#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->numid - kctl->id.numid;
+ unsigned int ioff = id->numid - kctl->id.numid;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->index - kctl->id.index;
+ unsigned int ioff = id->index - kctl->id.index;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index 760c969d885d..12bbf8c81112 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
char *buffer; /* vmallocated period */
size_t buffer_used; /* used length from period buffer */
struct mutex params_lock;
+ atomic_t rw_ref; /* concurrent read/write accesses */
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
struct snd_pcm_plugin *plugin_first;
struct snd_pcm_plugin *plugin_last;
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index bc96b14dfb2c..f4d5c998cc2b 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -40,7 +40,7 @@
#define EPOLLRDHUP 0x00002000
/* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (1 << 28)
+#define EPOLLEXCLUSIVE (1U << 28)
/*
* Request the handling of system wakeup events so as to prevent system suspends
@@ -52,13 +52,13 @@
*
* Requires CAP_BLOCK_SUSPEND
*/
-#define EPOLLWAKEUP (1 << 29)
+#define EPOLLWAKEUP (1U << 29)
/* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1 << 30)
+#define EPOLLONESHOT (1U << 30)
/* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1 << 31)
+#define EPOLLET (1U << 31)
/*
* On x86-64 make the 64bit structure have the same alignment as the
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb99143..05b9bb63dbec 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -870,6 +870,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_S390_USER_INSTR0 130
#define KVM_CAP_MSI_DEVID 131
#define KVM_CAP_PPC_HTM 132
+#define KVM_CAP_S390_BPB 152
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 44b8a6bd5fe1..774cb2db1b89 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -167,46 +167,99 @@
/* If we did not see any headers from any supported C libraries,
* or we are being included in the kernel, then define everything
- * that we need. */
+ * that we need. Check for previous __UAPI_* definitions to give
+ * unsupported C libraries a way to opt out of any kernel definition. */
#else /* !defined(__GLIBC__) */
/* Definitions for if.h */
+#ifndef __UAPI_DEF_IF_IFCONF
#define __UAPI_DEF_IF_IFCONF 1
+#endif
+#ifndef __UAPI_DEF_IF_IFMAP
#define __UAPI_DEF_IF_IFMAP 1
+#endif
+#ifndef __UAPI_DEF_IF_IFNAMSIZ
#define __UAPI_DEF_IF_IFNAMSIZ 1
+#endif
+#ifndef __UAPI_DEF_IF_IFREQ
#define __UAPI_DEF_IF_IFREQ 1
+#endif
/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+#endif
/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif
/* Definitions for in.h */
+#ifndef __UAPI_DEF_IN_ADDR
#define __UAPI_DEF_IN_ADDR 1
+#endif
+#ifndef __UAPI_DEF_IN_IPPROTO
#define __UAPI_DEF_IN_IPPROTO 1
+#endif
+#ifndef __UAPI_DEF_IN_PKTINFO
#define __UAPI_DEF_IN_PKTINFO 1
+#endif
+#ifndef __UAPI_DEF_IP_MREQ
#define __UAPI_DEF_IP_MREQ 1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN
#define __UAPI_DEF_SOCKADDR_IN 1
+#endif
+#ifndef __UAPI_DEF_IN_CLASS
#define __UAPI_DEF_IN_CLASS 1
+#endif
/* Definitions for in6.h */
+#ifndef __UAPI_DEF_IN6_ADDR
#define __UAPI_DEF_IN6_ADDR 1
+#endif
+#ifndef __UAPI_DEF_IN6_ADDR_ALT
#define __UAPI_DEF_IN6_ADDR_ALT 1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN6
#define __UAPI_DEF_SOCKADDR_IN6 1
+#endif
+#ifndef __UAPI_DEF_IPV6_MREQ
#define __UAPI_DEF_IPV6_MREQ 1
+#endif
+#ifndef __UAPI_DEF_IPPROTO_V6
#define __UAPI_DEF_IPPROTO_V6 1
+#endif
+#ifndef __UAPI_DEF_IPV6_OPTIONS
#define __UAPI_DEF_IPV6_OPTIONS 1
+#endif
+#ifndef __UAPI_DEF_IN6_PKTINFO
#define __UAPI_DEF_IN6_PKTINFO 1
+#endif
+#ifndef __UAPI_DEF_IP6_MTUINFO
#define __UAPI_DEF_IP6_MTUINFO 1
+#endif
/* Definitions for ipx.h */
+#ifndef __UAPI_DEF_SOCKADDR_IPX
#define __UAPI_DEF_SOCKADDR_IPX 1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
+#endif
+#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
+#endif
+#ifndef __UAPI_DEF_IPX_CONFIG_DATA
#define __UAPI_DEF_IPX_CONFIG_DATA 1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEF
#define __UAPI_DEF_IPX_ROUTE_DEF 1
+#endif
/* Definitions for xattr.h */
+#ifndef __UAPI_DEF_XATTR
#define __UAPI_DEF_XATTR 1
+#endif
#endif /* __GLIBC__ */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e5a2e68b2236..ecc8e01c5616 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -106,7 +106,7 @@
#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
-#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+#define PCI_ROM_ADDRESS_MASK (~0x7ffU)
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 3d7a0fc021a7..39930ca998cd 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -87,6 +87,9 @@
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) \
((ver) & PSCI_VERSION_MINOR_MASK)
+#define PSCI_VERSION(maj, min) \
+ ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
+ ((min) & PSCI_VERSION_MINOR_MASK))
/* PSCI features decoding (>=1.0) */
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index 3f93d1695e7f..b455b0d86f26 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -34,6 +34,9 @@
/* Clear the entropy pool and associated counters. (Superuser only.) */
#define RNDCLEARPOOL _IO( 'R', 0x06 )
+/* Reseed CRNG. (Superuser only.) */
+#define RNDRESEEDCRNG _IO( 'R', 0x07 )
+
struct rand_pool_info {
int entropy_count;
int buf_size;
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be4f0c0..19f9dc2c06f6 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -369,7 +369,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
{
return (protocol == UAC_VERSION_1) ?
desc->baSourceID[desc->bNrInPins + 4] :
- desc->baSourceID[desc->bNrInPins + 6];
+ 2; /* in UAC2, this value is constant */
}
static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
@@ -377,7 +377,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
{
return (protocol == UAC_VERSION_1) ?
&desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 7];
+ &desc->baSourceID[desc->bNrInPins + 6];
}
static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
diff --git a/init/main.c b/init/main.c
index 48ffaaad8ac9..8ac3d15ddc1a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -81,6 +81,7 @@
#include <linux/proc_ns.h>
#include <linux/io.h>
#include <linux/kaiser.h>
+#include <linux/cache.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -915,14 +916,16 @@ static int try_to_run_init_process(const char *init_filename)
static noinline void __init kernel_init_freeable(void);
-#ifdef CONFIG_DEBUG_RODATA
-static bool rodata_enabled = true;
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_SET_MODULE_RONX)
+bool rodata_enabled __ro_after_init = true;
static int __init set_debug_rodata(char *str)
{
return strtobool(str, &rodata_enabled);
}
__setup("rodata=", set_debug_rodata);
+#endif
+#ifdef CONFIG_DEBUG_RODATA
static void mark_readonly(void)
{
if (rodata_enabled)
diff --git a/ipc/shm.c b/ipc/shm.c
index e2072ae4f90e..b626745e771c 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
if (IS_ERR(shp))
return PTR_ERR(shp);
+ if (shp->shm_file != sfd->file) {
+ /* ID was reused */
+ shm_unlock(shp);
+ return -EINVAL;
+ }
+
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
@@ -381,6 +387,17 @@ static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return sfd->vm_ops->fault(vma, vmf);
}
+static int shm_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct file *file = vma->vm_file;
+ struct shm_file_data *sfd = shm_file_data(file);
+
+ if (sfd->vm_ops && sfd->vm_ops->split)
+ return sfd->vm_ops->split(vma, addr);
+
+ return 0;
+}
+
#ifdef CONFIG_NUMA
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
@@ -414,8 +431,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
int ret;
/*
- * In case of remap_file_pages() emulation, the file can represent
- * removed IPC ID: propogate shm_lock() error to caller.
+ * In case of remap_file_pages() emulation, the file can represent an
+ * IPC ID that was removed, and possibly even reused by another shm
+ * segment already. Propagate this case as an error to caller.
*/
ret =__shm_open(vma);
if (ret)
@@ -439,6 +457,7 @@ static int shm_release(struct inode *ino, struct file *file)
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
+ fput(sfd->file);
shm_file_data(file) = NULL;
kfree(sfd);
return 0;
@@ -503,6 +522,7 @@ static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
.fault = shm_fault,
+ .split = shm_split,
#if defined(CONFIG_NUMA)
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,
@@ -1200,7 +1220,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
- sfd->file = shp->shm_file;
+ /*
+ * We need to take a reference to the real shm file to prevent the
+ * pointer from becoming stale in cases where the lifetime of the outer
+ * file extends beyond that of the shm segment. It's not usually
+ * possible, but it can happen during remap_file_pages() emulation as
+ * that unmaps the memory, then does ->mmap() via file reference only.
+ * We'll deny the ->mmap() if the shm segment was since removed, but to
+ * detect shm ID reuse we need to compare the file pointers.
+ */
+ sfd->file = get_file(shp->shm_file);
sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 9a1e6ed7babc..eb43f7e219f9 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -20,8 +20,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
{
int i;
- for (i = 0; i < array->map.max_entries; i++)
+ for (i = 0; i < array->map.max_entries; i++) {
free_percpu(array->pptrs[i]);
+ cond_resched();
+ }
}
static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -37,6 +39,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
return -ENOMEM;
}
array->pptrs[i] = ptr;
+ cond_resched();
}
return 0;
@@ -48,8 +51,9 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
+ u64 cost, array_size, mask64;
struct bpf_array *array;
- u64 array_size, mask64;
+ int ret;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -92,8 +96,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size;
/* make sure there is no u32 overflow later in round_up() */
- if (array_size >= U32_MAX - PAGE_SIZE)
+ cost = array_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
+ if (percpu) {
+ cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+ }
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ ret = bpf_map_precharge_memlock(cost);
+ if (ret < 0)
+ return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size);
@@ -107,20 +122,16 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.key_size = attr->key_size;
array->map.value_size = attr->value_size;
array->map.max_entries = attr->max_entries;
+ array->map.map_flags = attr->map_flags;
+ array->map.pages = cost;
array->elem_size = elem_size;
- if (!percpu)
- goto out;
-
- array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
-
- if (array_size >= U32_MAX - PAGE_SIZE ||
- elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+ if (percpu &&
+ (elem_size > PCPU_MIN_UNIT_SIZE ||
+ bpf_array_alloc_percpu(array))) {
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
-out:
- array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
return &array->map;
}
@@ -179,7 +190,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
- u32 index = *(u32 *)key;
+ u32 index = key ? *(u32 *)key : U32_MAX;
u32 *next = (u32 *)next_key;
if (index >= array->map.max_entries) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index ad2f0ed75471..a36a532c056d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -326,12 +326,15 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
struct hlist_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
- int i;
+ int i = 0;
WARN_ON_ONCE(!rcu_read_lock_held());
key_size = map->key_size;
+ if (!key)
+ goto find_first_elem;
+
hash = htab_map_hash(key, key_size);
head = select_bucket(htab, hash);
@@ -339,10 +342,8 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
/* lookup the key */
l = lookup_elem_raw(head, hash, key, key_size);
- if (!l) {
- i = 0;
+ if (!l)
goto find_first_elem;
- }
/* key was found, get next key in the same bucket */
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index be8519148c25..a2a232dec236 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
smap->map.key_size = attr->key_size;
smap->map.value_size = value_size;
smap->map.max_entries = attr->max_entries;
+ smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 91a2d3752007..ca7e277e8b5f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -508,14 +508,18 @@ static int map_get_next_key(union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
- err = -ENOMEM;
- key = kmalloc(map->key_size, GFP_USER);
- if (!key)
- goto err_put;
-
- err = -EFAULT;
- if (copy_from_user(key, ukey, map->key_size) != 0)
- goto free_key;
+ if (ukey) {
+ err = -ENOMEM;
+ key = kmalloc(map->key_size, GFP_USER);
+ if (!key)
+ goto err_put;
+
+ err = -EFAULT;
+ if (copy_from_user(key, ukey, map->key_size) != 0)
+ goto free_key;
+ } else {
+ key = NULL;
+ }
err = -ENOMEM;
next_key = kmalloc(map->key_size, GFP_USER);
@@ -801,7 +805,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
union bpf_attr attr = {};
int err;
- if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
+ if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (!access_ok(VERIFY_READ, uattr, 1))
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c6a4cf8ba645..fa43deb5e1a8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -63,6 +63,12 @@ struct cpuhp_cpu_state {
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
+static struct lock_class_key cpuhp_state_key;
+static struct lockdep_map cpuhp_state_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
+#endif
+
/**
* cpuhp_step - Hotplug state machine step
* @name: Name of the step
@@ -846,6 +852,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
st->should_run = false;
+ lock_map_acquire(&cpuhp_state_lock_map);
/* Single callback invocation for [un]install ? */
if (st->single) {
if (st->cb_state < CPUHP_AP_ONLINE) {
@@ -877,6 +884,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
else if (st->state > st->target)
ret = cpuhp_ap_offline(cpu, st);
}
+ lock_map_release(&cpuhp_state_lock_map);
st->result = ret;
complete(&st->done);
}
@@ -891,6 +899,9 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
if (!cpu_online(cpu))
return 0;
+ lock_map_acquire(&cpuhp_state_lock_map);
+ lock_map_release(&cpuhp_state_lock_map);
+
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
@@ -934,6 +945,8 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
enum cpuhp_state state = st->state;
trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
+ lock_map_acquire(&cpuhp_state_lock_map);
+ lock_map_release(&cpuhp_state_lock_map);
__cpuhp_kick_ap_work(st);
wait_for_completion(&st->done);
trace_cpuhp_exit(cpu, st->state, state, st->result);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index e9fdb5203de5..c265f1c3ae50 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -117,23 +117,20 @@ int get_callchain_buffers(int event_max_stack)
goto exit;
}
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- /*
- * If requesting per event more than the global cap,
- * return a different error to help userspace figure
- * this out.
- *
- * And also do it here so that we have &callchain_mutex held.
- */
- if (event_max_stack > sysctl_perf_event_max_stack)
- err = -EOVERFLOW;
+ /*
+ * If requesting per event more than the global cap,
+ * return a different error to help userspace figure
+ * this out.
+ *
+ * And also do it here so that we have &callchain_mutex held.
+ */
+ if (event_max_stack > sysctl_perf_event_max_stack) {
+ err = -EOVERFLOW;
goto exit;
}
- err = alloc_callchain_buffers();
+ if (count == 1)
+ err = alloc_callchain_buffers();
exit:
if (err)
atomic_dec(&nr_callchain_events);
@@ -227,12 +224,18 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
}
if (regs) {
+ mm_segment_t fs;
+
if (crosstask)
goto exit_put;
if (add_mark)
perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
+
+ fs = get_fs();
+ set_fs(USER_DS);
perf_callchain_user(&ctx, regs);
+ set_fs(fs);
}
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f74fbfe5465c..9a2fb850232e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -453,7 +453,7 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
@@ -4092,6 +4092,9 @@ static void _free_event(struct perf_event *event)
if (event->ctx)
put_ctx(event->ctx);
+ if (event->hw.target)
+ put_task_struct(event->hw.target);
+
exclusive_event_destroy(event);
module_put(event->pmu->module);
@@ -5670,9 +5673,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64));
}
-/*
- * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
- */
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
@@ -5717,6 +5717,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
+/*
+ * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
+ *
+ * The problem is that its both hard and excessively expensive to iterate the
+ * child list, not to mention that its impossible to IPI the children running
+ * on another CPU, from interrupt/NMI context.
+ */
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
@@ -9212,6 +9219,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
* and we cannot use the ctx information because we need the
* pmu before we get a ctx.
*/
+ get_task_struct(task);
event->hw.target = task;
}
@@ -9261,9 +9269,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
local64_set(&hwc->period_left, hwc->sample_period);
/*
- * we currently do not support PERF_FORMAT_GROUP on inherited events
+ * We currently do not support PERF_SAMPLE_READ on inherited events.
+ * See perf_output_read().
*/
- if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
+ if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
goto err_ns;
if (!has_branch_stack(event))
@@ -9291,8 +9300,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
sizeof(unsigned long),
GFP_KERNEL);
- if (!event->addr_filters_offs)
+ if (!event->addr_filters_offs) {
+ err = -ENOMEM;
goto err_per_task;
+ }
/* force hw sync on the address filters */
event->addr_filters_gen = 1;
@@ -9326,6 +9337,8 @@ err_ns:
perf_detach_cgroup(event);
if (event->ns)
put_pid_ns(event->ns);
+ if (event->hw.target)
+ put_task_struct(event->hw.target);
kfree(event);
return ERR_PTR(err);
@@ -9445,9 +9458,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
- ret = -EINVAL;
+ return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
- ret = -EINVAL;
+ return -EINVAL;
}
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 3f8cb1e14588..253ae2da13c3 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -427,16 +427,9 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
* @bp: the breakpoint structure to modify
* @attr: new breakpoint attributes
- * @triggered: callback to trigger when we hit the breakpoint
- * @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
- u64 old_addr = bp->attr.bp_addr;
- u64 old_len = bp->attr.bp_len;
- int old_type = bp->attr.bp_type;
- int err = 0;
-
/*
* modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
* will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -451,27 +444,18 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
bp->attr.bp_addr = attr->bp_addr;
bp->attr.bp_type = attr->bp_type;
bp->attr.bp_len = attr->bp_len;
+ bp->attr.disabled = 1;
- if (attr->disabled)
- goto end;
-
- err = validate_hw_breakpoint(bp);
- if (!err)
- perf_event_enable(bp);
+ if (!attr->disabled) {
+ int err = validate_hw_breakpoint(bp);
- if (err) {
- bp->attr.bp_addr = old_addr;
- bp->attr.bp_type = old_type;
- bp->attr.bp_len = old_len;
- if (!bp->attr.disabled)
- perf_event_enable(bp);
+ if (err)
+ return err;
- return err;
+ perf_event_enable(bp);
+ bp->attr.disabled = 0;
}
-end:
- bp->attr.disabled = attr->disabled;
-
return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 257fa460b846..017f7933a37d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <linux/poll.h>
+#include <linux/nospec.h>
#include "internal.h"
@@ -844,8 +845,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
return NULL;
/* AUX space */
- if (pgoff >= rb->aux_pgoff)
- return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+ if (pgoff >= rb->aux_pgoff) {
+ int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
+ return virt_to_page(rb->aux_pages[aux_pgoff]);
+ }
}
return __perf_mmap_to_page(rb, pgoff);
diff --git a/kernel/exit.c b/kernel/exit.c
index fb2ebcf3ca7c..832dff86b52a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1662,6 +1662,10 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
+ /* -INT_MIN is not defined */
+ if (upid == INT_MIN)
+ return -ESRCH;
+
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
diff --git a/kernel/futex.c b/kernel/futex.c
index 1e4c72447456..8ab0ddd4cf8f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1548,6 +1548,45 @@ out:
return ret;
}
+static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+{
+ unsigned int op = (encoded_op & 0x70000000) >> 28;
+ unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
+ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+ int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
+ int oldval, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
+ if (oparg < 0 || oparg > 31)
+ return -EINVAL;
+ oparg = 1 << oparg;
+ }
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
+ if (ret)
+ return ret;
+
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ return oldval == cmparg;
+ case FUTEX_OP_CMP_NE:
+ return oldval != cmparg;
+ case FUTEX_OP_CMP_LT:
+ return oldval < cmparg;
+ case FUTEX_OP_CMP_GE:
+ return oldval >= cmparg;
+ case FUTEX_OP_CMP_LE:
+ return oldval <= cmparg;
+ case FUTEX_OP_CMP_GT:
+ return oldval > cmparg;
+ default:
+ return -ENOSYS;
+ }
+}
+
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index e75e29e4434a..3514d955af94 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -11,6 +11,11 @@
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
{
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (!__ratelimit(&ratelimit))
+ return;
+
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 5994867526f3..12e2d7cdfed0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -889,7 +889,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
* This code is triggered unconditionally. Check the affinity
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
- if (desc->irq_common_data.affinity)
+ if (cpumask_available(desc->irq_common_data.affinity))
cpumask_copy(mask, desc->irq_common_data.affinity);
else
valid = false;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a1a07cf1101f..69485183af79 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -125,7 +125,7 @@ static void *alloc_insn_page(void)
return module_alloc(PAGE_SIZE);
}
-static void free_insn_page(void *page)
+void __weak free_insn_page(void *page)
{
module_memfree(page);
}
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 0f49abeae337..e385a6e7e228 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -640,8 +640,7 @@ static void __torture_print_stats(char *page,
{
bool fail = 0;
int i, n_stress;
- long max = 0;
- long min = statp[0].n_lock_acquired;
+ long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
long long sum = 0;
n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
@@ -748,7 +747,7 @@ static void lock_torture_cleanup(void)
* such, only perform the underlying torture-specific cleanups,
* and avoid anything related to locktorture.
*/
- if (!cxt.lwsa)
+ if (!cxt.lwsa && !cxt.lrsa)
goto end;
if (writer_tasks) {
@@ -822,6 +821,13 @@ static int __init lock_torture_init(void)
firsterr = -EINVAL;
goto unwind;
}
+
+ if (nwriters_stress == 0 && nreaders_stress == 0) {
+ pr_alert("lock-torture: must run at least one locking thread\n");
+ firsterr = -EINVAL;
+ goto unwind;
+ }
+
if (cxt.cur_ops->init)
cxt.cur_ops->init();
@@ -845,17 +851,19 @@ static int __init lock_torture_init(void)
#endif
/* Initialize the statistics so that each run gets its own numbers. */
+ if (nwriters_stress) {
+ lock_is_write_held = 0;
+ cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ if (cxt.lwsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
- lock_is_write_held = 0;
- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
- if (cxt.lwsa == NULL) {
- VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < cxt.nrealwriters_stress; i++) {
- cxt.lwsa[i].n_lock_fail = 0;
- cxt.lwsa[i].n_lock_acquired = 0;
+ for (i = 0; i < cxt.nrealwriters_stress; i++) {
+ cxt.lwsa[i].n_lock_fail = 0;
+ cxt.lwsa[i].n_lock_acquired = 0;
+ }
}
if (cxt.cur_ops->readlock) {
@@ -872,19 +880,21 @@ static int __init lock_torture_init(void)
cxt.nrealreaders_stress = cxt.nrealwriters_stress;
}
- lock_is_read_held = 0;
- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
- if (cxt.lrsa == NULL) {
- VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
- firsterr = -ENOMEM;
- kfree(cxt.lwsa);
- cxt.lwsa = NULL;
- goto unwind;
- }
-
- for (i = 0; i < cxt.nrealreaders_stress; i++) {
- cxt.lrsa[i].n_lock_fail = 0;
- cxt.lrsa[i].n_lock_acquired = 0;
+ if (nreaders_stress) {
+ lock_is_read_held = 0;
+ cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ if (cxt.lrsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
+ firsterr = -ENOMEM;
+ kfree(cxt.lwsa);
+ cxt.lwsa = NULL;
+ goto unwind;
+ }
+
+ for (i = 0; i < cxt.nrealreaders_stress; i++) {
+ cxt.lrsa[i].n_lock_fail = 0;
+ cxt.lrsa[i].n_lock_acquired = 0;
+ }
}
}
@@ -914,12 +924,14 @@ static int __init lock_torture_init(void)
goto unwind;
}
- writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
- GFP_KERNEL);
- if (writer_tasks == NULL) {
- VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
- firsterr = -ENOMEM;
- goto unwind;
+ if (nwriters_stress) {
+ writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
+ GFP_KERNEL);
+ if (writer_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
}
if (cxt.cur_ops->readlock) {
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 426547a21a0c..f61a8c387c3e 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -194,7 +194,7 @@ void put_zone_device_page(struct page *page)
}
EXPORT_SYMBOL(put_zone_device_page);
-static void pgmap_radix_release(struct resource *res)
+static void pgmap_radix_release(struct resource *res, resource_size_t end_key)
{
resource_size_t key, align_start, align_size, align_end;
@@ -203,8 +203,11 @@ static void pgmap_radix_release(struct resource *res)
align_end = align_start + align_size - 1;
mutex_lock(&pgmap_lock);
- for (key = res->start; key <= res->end; key += SECTION_SIZE)
+ for (key = res->start; key <= res->end; key += SECTION_SIZE) {
+ if (key >= end_key)
+ break;
radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
+ }
mutex_unlock(&pgmap_lock);
}
@@ -255,7 +258,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
unlock_device_hotplug();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
- pgmap_radix_release(res);
+ pgmap_radix_release(res, -1);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
"%s: failed to free all reserved pages\n", __func__);
}
@@ -289,7 +292,7 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
void *devm_memremap_pages(struct device *dev, struct resource *res,
struct percpu_ref *ref, struct vmem_altmap *altmap)
{
- resource_size_t key, align_start, align_size, align_end;
+ resource_size_t key = 0, align_start, align_size, align_end;
pgprot_t pgprot = PAGE_KERNEL;
struct dev_pagemap *pgmap;
struct page_map *page_map;
@@ -392,7 +395,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
err_pfn_remap:
err_radix:
- pgmap_radix_release(res);
+ pgmap_radix_release(res, key);
devres_free(page_map);
return ERR_PTR(error);
}
diff --git a/kernel/module.c b/kernel/module.c
index ea2d3e3a029d..3a851cb10916 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1927,6 +1927,9 @@ static void frob_writable_data(const struct module_layout *layout,
/* livepatching wants to disable read-only so it can frob module. */
void module_disable_ro(const struct module *mod)
{
+ if (!rodata_enabled)
+ return;
+
frob_text(&mod->core_layout, set_memory_rw);
frob_rodata(&mod->core_layout, set_memory_rw);
frob_ro_after_init(&mod->core_layout, set_memory_rw);
@@ -1936,6 +1939,9 @@ void module_disable_ro(const struct module *mod)
void module_enable_ro(const struct module *mod, bool after_init)
{
+ if (!rodata_enabled)
+ return;
+
frob_text(&mod->core_layout, set_memory_ro);
frob_rodata(&mod->core_layout, set_memory_ro);
frob_text(&mod->init_layout, set_memory_ro);
@@ -1968,6 +1974,9 @@ void set_all_modules_text_rw(void)
{
struct module *mod;
+ if (!rodata_enabled)
+ return;
+
mutex_lock(&module_mutex);
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
@@ -1984,6 +1993,9 @@ void set_all_modules_text_ro(void)
{
struct module *mod;
+ if (!rodata_enabled)
+ return;
+
mutex_lock(&module_mutex);
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
@@ -1997,10 +2009,12 @@ void set_all_modules_text_ro(void)
static void disable_ro_nx(const struct module_layout *layout)
{
- frob_text(layout, set_memory_rw);
- frob_rodata(layout, set_memory_rw);
+ if (rodata_enabled) {
+ frob_text(layout, set_memory_rw);
+ frob_rodata(layout, set_memory_rw);
+ frob_ro_after_init(layout, set_memory_rw);
+ }
frob_rodata(layout, set_memory_x);
- frob_ro_after_init(layout, set_memory_rw);
frob_ro_after_init(layout, set_memory_x);
frob_writable_data(layout, set_memory_x);
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 693a64385d59..fa704f88ff8e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -322,8 +322,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
}
if (unlikely(is_child_reaper(pid))) {
- if (pid_ns_prepare_proc(ns))
+ if (pid_ns_prepare_proc(ns)) {
+ disable_pid_allocation(ns);
goto out_free;
+ }
}
get_pid_ns(ns);
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
index d5760c42f042..61d41ca41844 100644
--- a/kernel/printk/braille.c
+++ b/kernel/printk/braille.c
@@ -2,12 +2,13 @@
#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/errno.h>
#include <linux/string.h>
#include "console_cmdline.h"
#include "braille.h"
-char *_braille_console_setup(char **str, char **brl_options)
+int _braille_console_setup(char **str, char **brl_options)
{
if (!strncmp(*str, "brl,", 4)) {
*brl_options = "";
@@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options)
} else if (!strncmp(*str, "brl=", 4)) {
*brl_options = *str + 4;
*str = strchr(*brl_options, ',');
- if (!*str)
+ if (!*str) {
pr_err("need port name after brl=\n");
- else
- *((*str)++) = 0;
- } else
- return NULL;
+ return -EINVAL;
+ }
+ *((*str)++) = 0;
+ }
- return *str;
+ return 0;
}
int
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
index 769d771145c8..749a6756843a 100644
--- a/kernel/printk/braille.h
+++ b/kernel/printk/braille.h
@@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
c->brl_options = brl_options;
}
-char *
+/*
+ * Setup console according to braille options.
+ * Return -EINVAL on syntax error, 0 on success (or no braille option was
+ * actually given).
+ * Modifies str to point to the serial options
+ * Sets brl_options to the parsed braille options.
+ */
+int
_braille_console_setup(char **str, char **brl_options);
int
@@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
{
}
-static inline char *
+static inline int
_braille_console_setup(char **str, char **brl_options)
{
- return NULL;
+ return 0;
}
static inline int
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index cf15bdb6855b..e7b86ecfda36 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2436,7 +2436,7 @@ void console_unlock(void)
}
/*
- * Console drivers are called under logbuf_lock, so
+ * Console drivers are called with interrupts disabled, so
* @console_may_schedule should be cleared before; however, we may
* end up dumping a lot of lines, for example, if called from
* console registration path, and should invoke cond_resched()
@@ -2444,11 +2444,15 @@ void console_unlock(void)
* scheduling stall on a slow console leading to RCU stall and
* softlockup warnings which exacerbate the issue with more
* messages practically incapacitating the system.
+ *
+ * console_trylock() is not able to detect the preemptive
+ * context reliably. Therefore the value must be stored before
+ * and cleared after the the "again" goto label.
*/
do_cond_resched = console_may_schedule;
+again:
console_may_schedule = 0;
-again:
/*
* We released the console_sem lock, so we need to recheck if
* cpu is online and (if not) is there at least one CON_ANYTIME
diff --git a/kernel/resource.c b/kernel/resource.c
index 9b5f04404152..7ee3dd1ad2af 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -633,7 +633,8 @@ static int __find_resource(struct resource *root, struct resource *old,
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
size, constraint->align);
alloc.end = alloc.start + size - 1;
- if (resource_contains(&avail, &alloc)) {
+ if (alloc.start <= alloc.end &&
+ resource_contains(&avail, &alloc)) {
new->start = alloc.start;
new->end = alloc.end;
return 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4a1772c0260b..7837627368aa 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -560,7 +560,8 @@ void resched_cpu(int cpu)
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
- resched_curr(rq);
+ if (cpu_online(cpu) || cpu == smp_processor_id())
+ resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -2326,6 +2327,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_period = 0;
dl_se->flags = 0;
dl_se->dl_bw = 0;
+ dl_se->dl_density = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
@@ -4246,6 +4248,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+ dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
/*
* Changing the parameters of a task is 'tricky' and we're not doing
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 04058b6da801..1b7fa21d2003 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -484,13 +484,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
}
/*
- * When a -deadline entity is queued back on the runqueue, its runtime and
- * deadline might need updating.
+ * Revised wakeup rule [1]: For self-suspending tasks, rather then
+ * re-initializing task's runtime and deadline, the revised wakeup
+ * rule adjusts the task's runtime to avoid the task to overrun its
+ * density.
*
- * The policy here is that we update the deadline of the entity only if:
- * - the current deadline is in the past,
- * - using the remaining runtime with the current deadline would make
- * the entity exceed its bandwidth.
+ * Reasoning: a task may overrun the density if:
+ * runtime / (deadline - t) > dl_runtime / dl_deadline
+ *
+ * Therefore, runtime can be adjusted to:
+ * runtime = (dl_runtime / dl_deadline) * (deadline - t)
+ *
+ * In such way that runtime will be equal to the maximum density
+ * the task can use without breaking any rule.
+ *
+ * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
+ * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
+ */
+static void
+update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
+{
+ u64 laxity = dl_se->deadline - rq_clock(rq);
+
+ /*
+ * If the task has deadline < period, and the deadline is in the past,
+ * it should already be throttled before this check.
+ *
+ * See update_dl_entity() comments for further details.
+ */
+ WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
+
+ dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
+}
+
+/*
+ * Regarding the deadline, a task with implicit deadline has a relative
+ * deadline == relative period. A task with constrained deadline has a
+ * relative deadline <= relative period.
+ *
+ * We support constrained deadline tasks. However, there are some restrictions
+ * applied only for tasks which do not have an implicit deadline. See
+ * update_dl_entity() to know more about such restrictions.
+ *
+ * The dl_is_implicit() returns true if the task has an implicit deadline.
+ */
+static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_deadline == dl_se->dl_period;
+}
+
+/*
+ * When a deadline entity is placed in the runqueue, its runtime and deadline
+ * might need to be updated. This is done by a CBS wake up rule. There are two
+ * different rules: 1) the original CBS; and 2) the Revisited CBS.
+ *
+ * When the task is starting a new period, the Original CBS is used. In this
+ * case, the runtime is replenished and a new absolute deadline is set.
+ *
+ * When a task is queued before the begin of the next period, using the
+ * remaining runtime and deadline could make the entity to overflow, see
+ * dl_entity_overflow() to find more about runtime overflow. When such case
+ * is detected, the runtime and deadline need to be updated.
+ *
+ * If the task has an implicit deadline, i.e., deadline == period, the Original
+ * CBS is applied. the runtime is replenished and a new absolute deadline is
+ * set, as in the previous cases.
+ *
+ * However, the Original CBS does not work properly for tasks with
+ * deadline < period, which are said to have a constrained deadline. By
+ * applying the Original CBS, a constrained deadline task would be able to run
+ * runtime/deadline in a period. With deadline < period, the task would
+ * overrun the runtime/period allowed bandwidth, breaking the admission test.
+ *
+ * In order to prevent this misbehave, the Revisited CBS is used for
+ * constrained deadline tasks when a runtime overflow is detected. In the
+ * Revisited CBS, rather than replenishing & setting a new absolute deadline,
+ * the remaining runtime of the task is reduced to avoid runtime overflow.
+ * Please refer to the comments update_dl_revised_wakeup() function to find
+ * more about the Revised CBS rule.
*/
static void update_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
@@ -500,6 +571,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
+
+ if (unlikely(!dl_is_implicit(dl_se) &&
+ !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+ !dl_se->dl_boosted)){
+ update_dl_revised_wakeup(dl_se, rq);
+ return;
+ }
+
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
@@ -962,11 +1041,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
__dequeue_dl_entity(dl_se);
}
-static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
-{
- return dl_se->dl_deadline < dl_se->dl_period;
-}
-
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -998,7 +1072,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* If that is the case, the task will be throttled and
* the replenishment timer will be set to the next period.
*/
- if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+ if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
dl_check_constrained_dl(&p->dl);
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c6db32c0c557..627d9e4c957b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2429,7 +2429,8 @@ void task_numa_work(struct callback_head *work)
return;
- down_read(&mm->mmap_sem);
+ if (!down_read_trylock(&mm->mmap_sem))
+ return;
vma = find_vma(mm, start);
if (!vma) {
reset_ptenuma_scan(p);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 798114f4851d..12e8970f1146 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2207,7 +2207,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
queue_push_tasks(rq);
#endif /* CONFIG_SMP */
- if (p->prio < rq->curr->prio)
+ if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bb0bf0651b99..5efa37d6e8d2 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1196,7 +1196,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
cpu_base = raw_cpu_ptr(&hrtimer_bases);
- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
+ /*
+ * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
+ * clock modifications, so they needs to become CLOCK_MONOTONIC to
+ * ensure POSIX compliance.
+ */
+ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
base = hrtimer_clockid_to_base(clock_id);
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 9cff0ab82b63..e24008c098c6 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -300,14 +300,17 @@ out:
static int pc_clock_gettime(clockid_t id, struct timespec *ts)
{
struct posix_clock_desc cd;
+ struct timespec64 ts64;
int err;
err = get_clock_desc(id, &cd);
if (err)
return err;
- if (cd.clk->ops.clock_gettime)
- err = cd.clk->ops.clock_gettime(cd.clk, ts);
+ if (cd.clk->ops.clock_gettime) {
+ err = cd.clk->ops.clock_gettime(cd.clk, &ts64);
+ *ts = timespec64_to_timespec(ts64);
+ }
else
err = -EOPNOTSUPP;
@@ -319,14 +322,17 @@ static int pc_clock_gettime(clockid_t id, struct timespec *ts)
static int pc_clock_getres(clockid_t id, struct timespec *ts)
{
struct posix_clock_desc cd;
+ struct timespec64 ts64;
int err;
err = get_clock_desc(id, &cd);
if (err)
return err;
- if (cd.clk->ops.clock_getres)
- err = cd.clk->ops.clock_getres(cd.clk, ts);
+ if (cd.clk->ops.clock_getres) {
+ err = cd.clk->ops.clock_getres(cd.clk, &ts64);
+ *ts = timespec64_to_timespec(ts64);
+ }
else
err = -EOPNOTSUPP;
@@ -337,6 +343,7 @@ static int pc_clock_getres(clockid_t id, struct timespec *ts)
static int pc_clock_settime(clockid_t id, const struct timespec *ts)
{
+ struct timespec64 ts64 = timespec_to_timespec64(*ts);
struct posix_clock_desc cd;
int err;
@@ -350,7 +357,7 @@ static int pc_clock_settime(clockid_t id, const struct timespec *ts)
}
if (cd.clk->ops.clock_settime)
- err = cd.clk->ops.clock_settime(cd.clk, ts);
+ err = cd.clk->ops.clock_settime(cd.clk, &ts64);
else
err = -EOPNOTSUPP;
out:
@@ -403,29 +410,36 @@ static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec *ts)
{
clockid_t id = kit->it_clock;
struct posix_clock_desc cd;
+ struct itimerspec64 ts64;
if (get_clock_desc(id, &cd))
return;
- if (cd.clk->ops.timer_gettime)
- cd.clk->ops.timer_gettime(cd.clk, kit, ts);
-
+ if (cd.clk->ops.timer_gettime) {
+ cd.clk->ops.timer_gettime(cd.clk, kit, &ts64);
+ *ts = itimerspec64_to_itimerspec(&ts64);
+ }
put_clock_desc(&cd);
}
static int pc_timer_settime(struct k_itimer *kit, int flags,
struct itimerspec *ts, struct itimerspec *old)
{
+ struct itimerspec64 ts64 = itimerspec_to_itimerspec64(ts);
clockid_t id = kit->it_clock;
struct posix_clock_desc cd;
+ struct itimerspec64 old64;
int err;
err = get_clock_desc(id, &cd);
if (err)
return err;
- if (cd.clk->ops.timer_settime)
- err = cd.clk->ops.timer_settime(cd.clk, kit, flags, ts, old);
+ if (cd.clk->ops.timer_settime) {
+ err = cd.clk->ops.timer_settime(cd.clk, kit, flags, &ts64, &old64);
+ if (old)
+ *old = itimerspec64_to_itimerspec(&old64);
+ }
else
err = -EOPNOTSUPP;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a26036d37a38..382b159d8592 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -205,6 +205,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
update_clock_read_data(&rd);
+ if (sched_clock_timer.function != NULL) {
+ /* update timeout for clock wrap */
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+ }
+
r = rate;
if (r >= 4000000) {
r /= 1000000;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ec0169f49c39..00f3db2c6cca 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1919,6 +1919,12 @@ int timers_dead_cpu(unsigned int cpu)
raw_spin_lock_irq(&new_base->lock);
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ /*
+ * The current CPUs base clock might be stale. Update it
+ * before moving the timers over.
+ */
+ forward_timer_base(new_base);
+
BUG_ON(old_base->running_timer);
for (i = 0; i < WHEEL_SIZE; i++)
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index ba7d8b288bb3..ef4f16e81283 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
+#include <linux/nmi.h>
#include <asm/uaccess.h>
@@ -96,6 +97,9 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
next_one:
i = 0;
+
+ touch_nmi_watchdog();
+
raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
curr = timerqueue_getnext(&base->active);
@@ -207,6 +211,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
{
struct clock_event_device *dev = td->evtdev;
+ touch_nmi_watchdog();
+
SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
if (cpu < 0)
SEQ_printf(m, "Broadcast device\n");
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0193f58c45f0..e35a411bea4b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -322,6 +322,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
static int regex_match_front(char *str, struct regex *r, int len)
{
+ if (len < r->len)
+ return 0;
+
if (strncmp(str, r->pattern, r->len) == 0)
return 1;
return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5ff45cae4ac4..ea3ed03fed7e 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -607,7 +607,7 @@ static int create_trace_kprobe(int argc, char **argv)
bool is_return = false, is_delete = false;
char *symbol = NULL, *event = NULL, *group = NULL;
char *arg;
- unsigned long offset = 0;
+ long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
@@ -675,7 +675,7 @@ static int create_trace_kprobe(int argc, char **argv)
symbol = argv[1];
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
- if (ret) {
+ if (ret || offset < 0 || offset > UINT_MAX) {
pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 8c0553d9afd3..5ea191b917e9 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -319,7 +319,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
}
/* Split symbol and offset. */
-int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
+int traceprobe_split_symbol_offset(char *symbol, long *offset)
{
char *tmp;
int ret;
@@ -327,13 +327,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
if (!offset)
return -EINVAL;
- tmp = strchr(symbol, '+');
+ tmp = strpbrk(symbol, "+-");
if (tmp) {
- /* skip sign because kstrtoul doesn't accept '+' */
- ret = kstrtoul(tmp + 1, 0, offset);
+ ret = kstrtol(tmp, 0, offset);
if (ret)
return ret;
-
*tmp = '\0';
} else
*offset = 0;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 0c0ae54d44c6..2b84c0de92c7 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -354,7 +354,7 @@ extern int traceprobe_conflict_field_name(const char *name,
extern void traceprobe_update_arg(struct probe_arg *arg);
extern void traceprobe_free_probe_arg(struct probe_arg *arg);
-extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
+extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
extern ssize_t traceprobe_probes_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 0913693caf6e..788262984818 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -149,6 +149,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
return;
ret = strncpy_from_user(dst, src, maxlen);
+ if (ret == maxlen)
+ dst[--ret] = '\0';
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d0639d917899..c8e7cc0e6ff6 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -202,7 +202,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
lockdep_is_held(&tracepoints_mutex));
old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
@@ -235,7 +235,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
lockdep_is_held(&tracepoints_mutex));
old = func_remove(&tp_funcs, func);
if (IS_ERR(old)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0750110471e4..73203f649ad4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4169,6 +4169,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
+ * current_work - retrieve %current task's work struct
+ *
+ * Determine if %current task is a workqueue worker and what it's working on.
+ * Useful to find out the context that the %current task is running in.
+ *
+ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
+ */
+struct work_struct *current_work(void)
+{
+ struct worker *worker = current_wq_worker();
+
+ return worker ? worker->current_work : NULL;
+}
+EXPORT_SYMBOL(current_work);
+
+/**
* current_is_workqueue_rescuer - is %current workqueue rescuer?
*
* Determine whether %current is a workqueue rescuer. Can be used from
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..5323b59ca393 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -83,7 +83,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
+ IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
+ pmd_free_pte_page(pmd)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -109,7 +110,8 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
+ IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
+ pud_free_pmd_page(pud)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 445dcaeb0f56..b733a83e5294 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@ static int kobject_add_internal(struct kobject *kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 93336502af08..0f64fcee4ccd 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+/*
+ * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
+ * code below, so we special case MIPS64r6 until the compiler can do better.
+ */
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ __asm__ ("dmulu %0,%1,%2" \
+ : "=d" ((UDItype)(w0)) \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+ __asm__ ("dmuhu %0,%1,%2" \
+ : "=d" ((UDItype)(w1)) \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+} while (0)
+#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
#define umul_ppmm(w1, w0, u, v) \
do { \
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 32d0ad058380..895961c53385 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -448,8 +448,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
if (!key ||
(ht->p.obj_cmpfn ?
ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
if (!ht->rhlist)
return rht_obj(ht, head);
diff --git a/mm/filemap.c b/mm/filemap.c
index a8d2c7a73d54..113c2248c476 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -621,7 +621,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
@@ -677,7 +677,7 @@ static int __add_to_page_cache_locked(struct page *page,
return error;
}
- error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (error) {
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
@@ -1252,8 +1252,7 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset,
- gfp_mask & GFP_RECLAIM_MASK);
+ err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (unlikely(err)) {
put_page(page);
page = NULL;
@@ -2001,7 +2000,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
if (!page)
return -ENOMEM;
- ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
+ ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index db77dcb38afd..375a103d7a56 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -52,6 +52,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
ret = -EFAULT;
goto out;
}
+
+ /*
+ * While get_vaddr_frames() could be used for transient (kernel
+ * controlled lifetime) pinning of memory pages all current
+ * users establish long term (userspace controlled lifetime)
+ * page pinning. Treat get_vaddr_frames() like
+ * get_user_pages_longterm() and disallow it for filesystem-dax
+ * mappings.
+ */
+ if (vma_is_fsdax(vma))
+ return -EOPNOTSUPP;
+
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vec->got_ref = true;
vec->is_pfns = false;
diff --git a/mm/gup.c b/mm/gup.c
index c63a0341ae38..be4ccddac26f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,6 +430,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
+ if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+ return -EFAULT;
+
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
@@ -982,6 +985,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages);
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas_arg)
+{
+ struct vm_area_struct **vmas = vmas_arg;
+ struct vm_area_struct *vma_prev = NULL;
+ long rc, i;
+
+ if (!pages)
+ return -EINVAL;
+
+ if (!vmas) {
+ vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+ GFP_KERNEL);
+ if (!vmas)
+ return -ENOMEM;
+ }
+
+ rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+ for (i = 0; i < rc; i++) {
+ struct vm_area_struct *vma = vmas[i];
+
+ if (vma == vma_prev)
+ continue;
+
+ vma_prev = vma;
+
+ if (vma_is_fsdax(vma))
+ break;
+ }
+
+ /*
+ * Either get_user_pages() failed, or the vma validation
+ * succeeded, in either case we don't need to put_page() before
+ * returning.
+ */
+ if (i >= rc)
+ goto out;
+
+ for (i = 0; i < rc; i++)
+ put_page(pages[i]);
+ rc = -EOPNOTSUPP;
+out:
+ if (vmas != vmas_arg)
+ kfree(vmas);
+ return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
/**
* populate_vma_page_range() - populate a range of pages in the vma.
* @vma: target vma
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c234c078693c..e2982ea26090 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2279,11 +2279,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, mapping);
- lock_page(page);
+ if (!trylock_page(page))
+ goto next;
/* split_huge_page() removes page from list on success */
if (!split_huge_page(page))
split++;
unlock_page(page);
+next:
put_page(page);
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5d7c006373d3..898eb26f5dc8 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -528,7 +528,12 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
goto out;
}
- VM_BUG_ON_PAGE(PageCompound(page), page);
+ /* TODO: teach khugepaged to collapse THP mapped with pte */
+ if (PageCompound(page)) {
+ result = SCAN_PAGE_COMPOUND;
+ goto out;
+ }
+
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5aa71a82ca73..851efb004857 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -921,6 +921,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
int ret;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
+ bool mlocked = PageMlocked(hpage);
/*
* Here we are interested only in user-mapped pages, so skip any
@@ -985,6 +986,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
pfn, page_mapcount(hpage));
/*
+ * try_to_unmap() might put mlocked page in lru cache, so call
+ * shake_page() again to ensure that it's flushed.
+ */
+ if (mlocked)
+ shake_page(hpage, 0);
+
+ /*
* Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if
* killing is needed or not. Only kill when the page
diff --git a/mm/memory.c b/mm/memory.c
index e2e68767a373..d2db2c4eb0a4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2848,6 +2848,17 @@ static int __do_fault(struct fault_env *fe, pgoff_t pgoff,
return ret;
}
+/*
+ * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
+ * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+ return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
static int pte_alloc_one_map(struct fault_env *fe)
{
struct vm_area_struct *vma = fe->vma;
@@ -2871,18 +2882,27 @@ static int pte_alloc_one_map(struct fault_env *fe)
map_pte:
/*
* If a huge pmd materialized under us just retry later. Use
- * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
- * didn't become pmd_trans_huge under us and then back to pmd_none, as
- * a result of MADV_DONTNEED running immediately after a huge pmd fault
- * in a different thread of this mm, in turn leading to a misleading
- * pmd_trans_huge() retval. All we have to ensure is that it is a
- * regular pmd that we can walk with pte_offset_map() and we can do that
- * through an atomic read in C, which is what pmd_trans_unstable()
- * provides.
+ * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
+ * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
+ * under us and then back to pmd_none, as a result of MADV_DONTNEED
+ * running immediately after a huge pmd fault in a different thread of
+ * this mm, in turn leading to a misleading pmd_trans_huge() retval.
+ * All we have to ensure is that it is a regular pmd that we can walk
+ * with pte_offset_map() and we can do that through an atomic read in
+ * C, which is what pmd_trans_unstable() provides.
*/
- if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd))
+ if (pmd_devmap_trans_unstable(fe->pmd))
return VM_FAULT_NOPAGE;
+ /*
+ * At this point we know that our vmf->pmd points to a page of ptes
+ * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
+ * for the duration of the fault. If a racing MADV_DONTNEED runs and
+ * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
+ * be valid and we will re-check to make sure the vmf->pte isn't
+ * pte_none() under vmf->ptl protection when we return to
+ * alloc_set_pte().
+ */
fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
&fe->ptl);
return 0;
@@ -3456,7 +3476,7 @@ static int handle_pte_fault(struct fault_env *fe)
fe->pte = NULL;
} else {
/* See comment in pte_alloc_one_map() */
- if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd))
+ if (pmd_devmap_trans_unstable(fe->pmd))
return 0;
/*
* A regular pmd is established and it can't morph into a huge
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 439cc63ad903..807236aed275 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2506,13 +2506,13 @@ void account_page_redirty(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied--;
dec_node_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
}
}
EXPORT_SYMBOL(account_page_redirty);
@@ -2618,15 +2618,15 @@ void cancel_dirty_page(struct page *page)
if (mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
lock_page_memcg(page);
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, wb);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
unlock_page_memcg(page);
} else {
ClearPageDirty(page);
@@ -2658,7 +2658,7 @@ int clear_page_dirty_for_io(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
/*
* Yes, Virginia, this is indeed insane.
@@ -2695,7 +2695,7 @@ int clear_page_dirty_for_io(struct page *page)
* always locked coming in here, so we get the desired
* exclusion.
*/
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
dec_node_page_state(page, NR_FILE_DIRTY);
@@ -2703,7 +2703,7 @@ int clear_page_dirty_for_io(struct page *page)
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1;
}
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
return ret;
}
return TestClearPageDirty(page);
diff --git a/mm/percpu.c b/mm/percpu.c
index 4e739fcf91bf..88b57ee35ba3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -70,6 +70,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/kmemleak.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
diff --git a/mm/shmem.c b/mm/shmem.c
index 2123bfc39ef2..42ca5df2c0e3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -466,36 +466,45 @@ next:
info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode;
- if (nr_to_split && split >= nr_to_split) {
- iput(inode);
- continue;
- }
+ if (nr_to_split && split >= nr_to_split)
+ goto leave;
- page = find_lock_page(inode->i_mapping,
+ page = find_get_page(inode->i_mapping,
(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
if (!page)
goto drop;
+ /* No huge page at the end of the file: nothing to split */
if (!PageTransHuge(page)) {
- unlock_page(page);
put_page(page);
goto drop;
}
+ /*
+ * Leave the inode on the list if we failed to lock
+ * the page at this time.
+ *
+ * Waiting for the lock may lead to deadlock in the
+ * reclaim path.
+ */
+ if (!trylock_page(page)) {
+ put_page(page);
+ goto leave;
+ }
+
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
- if (ret) {
- /* split failed: leave it on the list */
- iput(inode);
- continue;
- }
+ /* If split failed leave the inode on the list */
+ if (ret)
+ goto leave;
split++;
drop:
list_del_init(&info->shrinklist);
removed++;
+leave:
iput(inode);
}
diff --git a/mm/slab.c b/mm/slab.c
index 1f82d16a0518..c59844dbd034 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4096,7 +4096,8 @@ next:
next_reap_node();
out:
/* Set up the next iteration */
- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
+ schedule_delayed_work_on(smp_processor_id(), work,
+ round_jiffies_relative(REAPTIMEOUT_AC));
}
#ifdef CONFIG_SLABINFO
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4365de74bdb0..557ad1367595 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2841,8 +2841,10 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
for (i = 0; i <= ZONE_NORMAL; i++) {
zone = &pgdat->node_zones[i];
- if (!managed_zone(zone) ||
- pgdat_reclaimable_pages(pgdat) == 0)
+ if (!managed_zone(zone))
+ continue;
+
+ if (!zone_reclaimable_pages(zone))
continue;
pfmemalloc_reserve += min_wmark_pages(zone);
@@ -2964,7 +2966,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
unsigned long nr_reclaimed;
struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX,
- .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
+ .gfp_mask = memalloc_noio_flags(gfp_mask),
.reclaim_idx = gfp_zone(gfp_mask),
.order = order,
.nodemask = nodemask,
@@ -2979,12 +2981,12 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
* 1 is returned so that the page allocator does not OOM kill at this
* point.
*/
- if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
+ if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
return 1;
trace_mm_vmscan_direct_reclaim_begin(order,
sc.may_writepage,
- gfp_mask,
+ sc.gfp_mask,
sc.reclaim_idx);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
@@ -3747,16 +3749,15 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- int classzone_idx = gfp_zone(gfp_mask);
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
+ .gfp_mask = memalloc_noio_flags(gfp_mask),
.order = order,
.priority = NODE_RECLAIM_PRIORITY,
.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
.may_swap = 1,
- .reclaim_idx = classzone_idx,
+ .reclaim_idx = gfp_zone(gfp_mask),
};
cond_resched();
@@ -3766,7 +3767,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* and RECLAIM_UNMAP.
*/
p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
- lockdep_set_current_reclaim_state(gfp_mask);
+ lockdep_set_current_reclaim_state(sc.gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c3585f345c22..00f9620e21ff 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1363,8 +1363,6 @@ static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
return zone == compare;
}
- /* The zone must be somewhere! */
- WARN_ON_ONCE(1);
return false;
}
@@ -1399,18 +1397,24 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
zone->present_pages,
zone->managed_pages);
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- seq_printf(m, "\n %-12s %lu", vmstat_text[i],
- zone_page_state(zone, i));
-
seq_printf(m,
"\n protection: (%ld",
zone->lowmem_reserve[0]);
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
- seq_printf(m,
- ")"
- "\n pagesets");
+ seq_putc(m, ')');
+
+ /* If unpopulated, no other information is useful */
+ if (!populated_zone(zone)) {
+ seq_putc(m, '\n');
+ return;
+ }
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ seq_printf(m, "\n %-12s %lu", vmstat_text[i],
+ zone_page_state(zone, i));
+
+ seq_printf(m, "\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fbfacd51aa34..fb3e2a50d76e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -29,6 +29,7 @@
#include <linux/net_tstamp.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
#include <net/arp.h>
#include <net/switchdev.h>
@@ -562,8 +563,7 @@ static int vlan_dev_init(struct net_device *dev)
NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
NETIF_F_ALL_FCOE;
- dev->features |= real_dev->vlan_features | NETIF_F_LLTX |
- NETIF_F_GSO_SOFTWARE;
+ dev->features |= dev->hw_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
dev->gso_max_segs = real_dev->gso_max_segs;
if (dev->features & NETIF_F_VLAN_FEATURES)
@@ -659,8 +659,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+ struct phy_device *phydev = vlan->real_dev->phydev;
- if (ops->get_ts_info) {
+ if (phydev && phydev->drv && phydev->drv->ts_info) {
+ return phydev->drv->ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
return ops->get_ts_info(vlan->real_dev, info);
} else {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5d2693826afb..1e84c5226c84 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
#include <linux/module.h>
#include <linux/init.h>
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
#include "lec.h"
#include "lec_arpc.h"
#include "resources.h"
@@ -697,8 +700,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
if (bytes_left != 0)
pr_info("copy from user failed for %d bytes\n", bytes_left);
- if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
- !dev_lec[ioc_data.dev_num])
+ if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
+ return -EINVAL;
+ ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
+ if (!dev_lec[ioc_data.dev_num])
return -EINVAL;
vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
if (!vpriv)
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e7f690b571ea..5419b1214abd 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1964,10 +1964,22 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
/* if yes, the client has roamed and we have
* to unclaim it.
*/
- batadv_handle_unclaim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
- goto allow;
+ if (batadv_has_timed_out(claim->lasttime, 100)) {
+ /* only unclaim if the last claim entry is
+ * older than 100 ms to make sure we really
+ * have a roaming client here.
+ */
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n",
+ ethhdr->h_source);
+ batadv_handle_unclaim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ } else {
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n",
+ ethhdr->h_source);
+ goto handled;
+ }
}
/* check if it is a multicast/broadcast frame */
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 1904a93f47d5..de7b82ece499 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -755,7 +755,8 @@ static void set_ip_addr_bits(u8 addr_type, u8 *addr)
}
static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
- struct lowpan_btle_dev *dev)
+ struct lowpan_btle_dev *dev,
+ bool new_netdev)
{
struct lowpan_peer *peer;
@@ -786,7 +787,8 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
spin_unlock(&devices_lock);
/* Notifying peers about us needs to be done without locks held */
- INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
+ if (new_netdev)
+ INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
return peer->chan;
@@ -843,6 +845,7 @@ out:
static inline void chan_ready_cb(struct l2cap_chan *chan)
{
struct lowpan_btle_dev *dev;
+ bool new_netdev = false;
dev = lookup_dev(chan->conn);
@@ -853,12 +856,13 @@ static inline void chan_ready_cb(struct l2cap_chan *chan)
l2cap_chan_del(chan, -ENOENT);
return;
}
+ new_netdev = true;
}
if (!try_module_get(THIS_MODULE))
return;
- add_peer_chan(chan, dev);
+ add_peer_chan(chan, dev, new_netdev);
ifup(dev->netdev);
}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 1aff2da9bc74..5d3698170004 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -163,6 +163,9 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk)
}
EXPORT_SYMBOL(bt_accept_enqueue);
+/* Calling function must hold the sk lock.
+ * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
+ */
void bt_accept_unlink(struct sock *sk)
{
BT_DBG("sk %p state %d", sk, sk->sk_state);
@@ -181,11 +184,32 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
BT_DBG("parent %p", parent);
+restart:
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
+ /* Prevent early freeing of sk due to unlink and sock_kill */
+ sock_hold(sk);
lock_sock(sk);
+ /* Check sk has not already been unlinked via
+ * bt_accept_unlink() due to serialisation caused by sk locking
+ */
+ if (!bt_sk(sk)->parent) {
+ BT_DBG("sk %p, already unlinked", sk);
+ release_sock(sk);
+ sock_put(sk);
+
+ /* Restart the loop as sk is no longer in the list
+ * and also avoid a potential infinite loop because
+ * list_for_each_entry_safe() is not thread safe.
+ */
+ goto restart;
+ }
+
+ /* sk is safely in the parent list so reduce reference count */
+ sock_put(sk);
+
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
bt_accept_unlink(sk);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index dc59eae54717..cc061495f653 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -749,18 +749,31 @@ static bool conn_use_rpa(struct hci_conn *conn)
}
static void hci_req_add_le_create_conn(struct hci_request *req,
- struct hci_conn *conn)
+ struct hci_conn *conn,
+ bdaddr_t *direct_rpa)
{
struct hci_cp_le_create_conn cp;
struct hci_dev *hdev = conn->hdev;
u8 own_addr_type;
- /* Update random address, but set require_privacy to false so
- * that we never connect with an non-resolvable address.
+ /* If direct address was provided we use it instead of current
+ * address.
*/
- if (hci_update_random_address(req, false, conn_use_rpa(conn),
- &own_addr_type))
- return;
+ if (direct_rpa) {
+ if (bacmp(&req->hdev->random_addr, direct_rpa))
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ direct_rpa);
+
+ /* direct address is always RPA */
+ own_addr_type = ADDR_LE_DEV_RANDOM;
+ } else {
+ /* Update random address, but set require_privacy to false so
+ * that we never connect with an non-resolvable address.
+ */
+ if (hci_update_random_address(req, false, conn_use_rpa(conn),
+ &own_addr_type))
+ return;
+ }
memset(&cp, 0, sizeof(cp));
@@ -825,7 +838,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role)
+ u8 role, bdaddr_t *direct_rpa)
{
struct hci_conn_params *params;
struct hci_conn *conn;
@@ -940,7 +953,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
- hci_req_add_le_create_conn(&req, conn);
+ hci_req_add_le_create_conn(&req, conn, direct_rpa);
create_conn:
err = hci_req_run(&req, create_le_conn_complete);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3ac89e9ace71..4bd72d2fe415 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -548,6 +548,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ bool changed = false;
/* If Connectionless Slave Broadcast master role is supported
* enable all necessary events for it.
@@ -557,6 +558,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
events[1] |= 0x80; /* Synchronization Train Complete */
events[2] |= 0x10; /* Slave Page Response Timeout */
events[2] |= 0x20; /* CSB Channel Map Change */
+ changed = true;
}
/* If Connectionless Slave Broadcast slave role is supported
@@ -567,13 +569,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
events[2] |= 0x02; /* CSB Receive */
events[2] |= 0x04; /* CSB Timeout */
events[2] |= 0x08; /* Truncated Page Complete */
+ changed = true;
}
/* Enable Authenticated Payload Timeout Expired event if supported */
- if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
+ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
events[2] |= 0x80;
+ changed = true;
+ }
- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+ /* Some Broadcom based controllers indicate support for Set Event
+ * Mask Page 2 command, but then actually do not support it. Since
+ * the default value is all bits set to zero, the command is only
+ * required if the event mask has to be changed. In case no change
+ * to the event mask is needed, skip this command.
+ */
+ if (changed)
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
+ sizeof(events), events);
}
static int hci_init3_req(struct hci_request *req, unsigned long opt)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index e17aacbc5630..d2f9eb169ba8 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4646,7 +4646,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
/* This function requires the caller holds hdev->lock */
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
- u8 addr_type, u8 adv_type)
+ u8 addr_type, u8 adv_type,
+ bdaddr_t *direct_rpa)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@@ -4697,7 +4698,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
- HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
+ HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
+ direct_rpa);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@@ -4807,8 +4809,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
bdaddr_type = irk->addr_type;
}
- /* Check if we have been requested to connect to this device */
- conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
+ /* Check if we have been requested to connect to this device.
+ *
+ * direct_addr is set only for directed advertising reports (it is NULL
+ * for advertising reports) and is already verified to be RPA above.
+ */
+ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
+ direct_addr);
if (conn && type == LE_ADV_IND) {
/* Store report for later inclusion by
* mgmt_device_connected
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2bbca23a9d05..1fc23cb4a3e0 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7148,7 +7148,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hcon = hci_connect_le(hdev, dst, dst_type,
chan->sec_level,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, NULL);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level,
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 658c900752c6..ead4d1baeaa6 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2233,8 +2233,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
else
sec_level = authreq_to_seclevel(auth);
- if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
+ if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
+ /* If link is already encrypted with sufficient security we
+ * still need refresh encryption as per Core Spec 5.0 Vol 3,
+ * Part H 2.4.6
+ */
+ smp_ltk_encrypt(conn, hcon->sec_level);
return 0;
+ }
if (sec_level > hcon->pending_sec_level)
hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ed0dd3340084..8e173324693d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -504,8 +504,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -ELOOP;
- /* Device is already being bridged */
- if (br_port_exists(dev))
+ /* Device has master upper dev */
+ if (netdev_master_upper_dev_get(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 8bd569695e76..abf711112418 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -230,6 +230,9 @@ static ssize_t brport_show(struct kobject *kobj,
struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = to_brport(kobj);
+ if (!brport_attr->show)
+ return -EINVAL;
+
return brport_attr->show(p, buf);
}
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 9024283d2bca..9adf16258cab 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -172,18 +172,69 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
return true;
}
+static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
+{
+ return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
+}
+
+static bool wormhash_offset_invalid(int off, unsigned int len)
+{
+ if (off == 0) /* not present */
+ return false;
+
+ if (off < (int)sizeof(struct ebt_among_info) ||
+ off % __alignof__(struct ebt_mac_wormhash))
+ return true;
+
+ off += sizeof(struct ebt_mac_wormhash);
+
+ return off > len;
+}
+
+static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
+{
+ if (a == 0)
+ a = sizeof(struct ebt_among_info);
+
+ return ebt_mac_wormhash_size(wh) + a == b;
+}
+
static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
container_of(par->matchinfo, const struct ebt_entry_match, data);
- int expected_length = sizeof(struct ebt_among_info);
+ unsigned int expected_length = sizeof(struct ebt_among_info);
const struct ebt_mac_wormhash *wh_dst, *wh_src;
int err;
+ if (expected_length > em->match_size)
+ return -EINVAL;
+
+ if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
+ wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
+ return -EINVAL;
+
wh_dst = ebt_among_wh_dst(info);
- wh_src = ebt_among_wh_src(info);
+ if (poolsize_invalid(wh_dst))
+ return -EINVAL;
+
expected_length += ebt_mac_wormhash_size(wh_dst);
+ if (expected_length > em->match_size)
+ return -EINVAL;
+
+ wh_src = ebt_among_wh_src(info);
+ if (poolsize_invalid(wh_src))
+ return -EINVAL;
+
+ if (info->wh_src_ofs < info->wh_dst_ofs) {
+ if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
+ return -EINVAL;
+ } else {
+ if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
+ return -EINVAL;
+ }
+
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f5c11bbe27db..5a89a4ac86ef 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2031,7 +2031,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
- WARN_ON(type == EBT_COMPAT_TARGET && size_left);
+ if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+ return -EINVAL;
+
match32 = (struct compat_ebt_entry_mwt *) buf;
}
@@ -2087,6 +2089,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
*
* offsets are relative to beginning of struct ebt_entry (i.e., 0).
*/
+ for (i = 0; i < 4 ; ++i) {
+ if (offsets[i] >= *total)
+ return -EINVAL;
+ if (i == 0)
+ continue;
+ if (offsets[i-1] > offsets[i])
+ return -EINVAL;
+ }
+
for (i = 0, j = 1 ; j < 4 ; j++, i++) {
struct compat_ebt_entry_mwt *match32;
unsigned int size;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 25a30be862e9..98ea28dc03f9 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2512,6 +2512,11 @@ static int try_write(struct ceph_connection *con)
int ret = 1;
dout("try_write start %p state %lu\n", con, con->state);
+ if (con->state != CON_STATE_PREOPEN &&
+ con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN)
+ return 0;
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2537,6 +2542,8 @@ more:
}
more_kvec:
+ BUG_ON(!con->sock);
+
/* kvec data queued? */
if (con->out_kvec_left) {
ret = write_partial_kvec(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a8effc8b7280..500481003de4 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
__open_session(monc);
}
+static void un_backoff(struct ceph_mon_client *monc)
+{
+ monc->hunt_mult /= 2; /* reduce by 50% */
+ if (monc->hunt_mult < 1)
+ monc->hunt_mult = 1;
+ dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
+}
+
/*
* Reschedule delayed work timer.
*/
@@ -955,6 +963,7 @@ static void delayed_work(struct work_struct *work)
if (!monc->hunting) {
ceph_con_keepalive(&monc->con);
__validate_auth(monc);
+ un_backoff(monc);
}
if (is_auth) {
@@ -1114,9 +1123,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
dout("%s found mon%d\n", __func__, monc->cur_mon);
monc->hunting = false;
monc->had_a_connection = true;
- monc->hunt_mult /= 2; /* reduce by 50% */
- if (monc->hunt_mult < 1)
- monc->hunt_mult = 1;
+ un_backoff(monc);
+ __schedule_delayed(monc);
}
}
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d3f6c26425b3..255c0a075e49 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -295,6 +295,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
u32 yes;
struct crush_rule *r;
+ err = -EINVAL;
ceph_decode_32_safe(p, end, yes, bad);
if (!yes) {
dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/compat.c b/net/compat.c
index a96fd2f3507b..73671e6ec6eb 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -372,7 +372,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
optname == SO_ATTACH_REUSEPORT_CBPF)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+ if (!COMPAT_USE_64BIT_TIME &&
+ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
return do_set_sock_timeout(sock, level, optname, optval, optlen);
return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -437,7 +438,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+ if (!COMPAT_USE_64BIT_TIME &&
+ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
return do_get_sock_timeout(sock, level, optname, optval, optlen);
return sock_getsockopt(sock, level, optname, optval, optlen);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index c730f12bb0bd..93995575d23a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -995,7 +995,7 @@ bool dev_valid_name(const char *name)
{
if (*name == '\0')
return false;
- if (strlen(name) >= IFNAMSIZ)
+ if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
return false;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return false;
@@ -2205,8 +2205,11 @@ EXPORT_SYMBOL(netif_set_xps_queue);
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
+ bool disabling;
int rc;
+ disabling = txq < dev->real_num_tx_queues;
+
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
@@ -2222,15 +2225,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->num_tc)
netif_setup_tc(dev, txq);
- if (txq < dev->real_num_tx_queues) {
+ dev->real_num_tx_queues = txq;
+
+ if (disabling) {
+ synchronize_net();
qdisc_reset_all_tx_gt(dev, txq);
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, txq);
#endif
}
+ } else {
+ dev->real_num_tx_queues = txq;
}
- dev->real_num_tx_queues = txq;
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
@@ -2668,7 +2675,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
return 0;
- eth = (struct ethhdr *)skb_mac_header(skb);
+ eth = (struct ethhdr *)skb->data;
type = eth->h_proto;
}
@@ -2872,7 +2879,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL(passthru_features_check);
-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
@@ -3184,15 +3191,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+ const struct netprio_map *map;
+ const struct sock *sk;
+ unsigned int prioidx;
+
+ if (skb->priority)
+ return;
+ map = rcu_dereference_bh(skb->dev->priomap);
+ if (!map)
+ return;
+ sk = skb_to_full_sk(skb);
+ if (!sk)
+ return;
- if (!skb->priority && skb->sk && map) {
- unsigned int prioidx =
- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
+ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
- if (prioidx < map->priomap_len)
- skb->priority = map->priomap[prioidx];
- }
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c0548d268e1a..e3e6a3e2ca22 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
return -EINVAL;
list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- ha->type == addr_type) {
+ if (ha->type == addr_type &&
+ !memcmp(ha->addr, addr, addr_len)) {
if (global) {
/* check if addr is already used as global */
if (ha->global_use)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7b315663f840..128c811dcb1a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@ do { \
static void neigh_timer_handler(unsigned long arg);
static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev);
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
- pneigh_ifdown(tbl, dev);
- write_unlock_bh(&tbl->lock);
+ pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
return -ENOENT;
}
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev)
{
- struct pneigh_entry *n, **np;
+ struct pneigh_entry *n, **np, *freelist = NULL;
u32 h;
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
while ((n = *np) != NULL) {
if (!dev || n->dev == dev) {
*np = n->next;
- if (tbl->pdestructor)
- tbl->pdestructor(n);
- if (n->dev)
- dev_put(n->dev);
- kfree(n);
+ n->next = freelist;
+ freelist = n;
continue;
}
np = &n->next;
}
}
+ write_unlock_bh(&tbl->lock);
+ while ((n = freelist)) {
+ freelist = n->next;
+ n->next = NULL;
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ kfree(n);
+ }
return -ENOENT;
}
@@ -1130,10 +1138,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha;
}
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
- neigh->updated = jiffies;
-
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1155,6 +1159,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
}
}
+ /* Update timestamps only once we know we will make a change to the
+ * neighbour entry. Otherwise we risk to move the locktime window with
+ * noop updates and ignore relevant ARP updates.
+ */
+ if (new != old || lladdr != neigh->ha) {
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+ neigh->updated = jiffies;
+ }
+
if (new != old) {
neigh_del_timer(neigh);
if (new & NUD_PROBE)
@@ -2273,12 +2287,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
if (!err) {
- if (tb[NDA_IFINDEX])
+ if (tb[NDA_IFINDEX]) {
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ return -EINVAL;
filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-
- if (tb[NDA_MASTER])
+ }
+ if (tb[NDA_MASTER]) {
+ if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
+ return -EINVAL;
filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-
+ }
if (filter_idx || filter_master_idx)
flags |= NLM_F_DUMP_FILTERED;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b7efe2f19f83..04fd04ccaa04 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -312,6 +312,25 @@ out_undo:
goto out;
}
+static int __net_init net_defaults_init_net(struct net *net)
+{
+ net->core.sysctl_somaxconn = SOMAXCONN;
+ return 0;
+}
+
+static struct pernet_operations net_defaults_ops = {
+ .init = net_defaults_init_net,
+};
+
+static __init int net_defaults_init(void)
+{
+ if (register_pernet_subsys(&net_defaults_ops))
+ panic("Cannot initialize net default settings");
+
+ return 0;
+}
+
+core_initcall(net_defaults_init);
#ifdef CONFIG_NET_NS
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index fec448d29f42..a784cb9374dc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -918,6 +918,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
+ n->peeked = 0;
n->destructor = NULL;
C(tail);
C(end);
@@ -2630,7 +2631,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
- skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
@@ -3243,8 +3245,8 @@ normal:
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
- skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
- SKBTX_SHARED_FRAG;
+ skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
@@ -3490,24 +3492,18 @@ void __init skb_init(void)
NULL);
}
-/**
- * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
- * @skb: Socket buffer containing the buffers to be mapped
- * @sg: The scatter-gather list to map into
- * @offset: The offset into the buffer's contents to start mapping
- * @len: Length of buffer space to be mapped
- *
- * Fill the specified scatter-gather list with mappings/pointers into a
- * region of the buffer space attached to a socket buffer.
- */
static int
-__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
+ unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
if (copy > 0) {
if (copy > len)
copy = len;
@@ -3526,6 +3522,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
if (copy > len)
copy = len;
@@ -3540,16 +3538,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
}
skb_walk_frags(skb, frag_iter) {
- int end;
+ int end, ret;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
+
if (copy > len)
copy = len;
- elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
- copy);
+ ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+ copy, recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
if ((len -= copy) == 0)
return elt;
offset += copy;
@@ -3560,6 +3564,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
return elt;
}
+/**
+ * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ * @skb: Socket buffer containing the buffers to be mapped
+ * @sg: The scatter-gather list to map into
+ * @offset: The offset into the buffer's contents to start mapping
+ * @len: Length of buffer space to be mapped
+ *
+ * Fill the specified scatter-gather list with mappings/pointers into a
+ * region of the buffer space attached to a socket buffer. Returns either
+ * the number of scatterlist items used, or -EMSGSIZE if the contents
+ * could not fit.
+ */
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+ int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
+
+ if (nsg <= 0)
+ return nsg;
+
+ sg_mark_end(&sg[nsg - 1]);
+
+ return nsg;
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+
/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
* sglist without mark the sg which contain last skb data as the end.
* So the caller can mannipulate sg list as will when padding new data after
@@ -3582,19 +3611,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len)
{
- return __skb_to_sgvec(skb, sg, offset, len);
+ return __skb_to_sgvec(skb, sg, offset, len, 0);
}
EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
-{
- int nsg = __skb_to_sgvec(skb, sg, offset, len);
-
- sg_mark_end(&sg[nsg - 1]);
- return nsg;
-}
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -3732,7 +3753,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ sk->sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3877,7 +3898,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
return;
if (tsonly) {
- skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+ skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
+ SKBTX_ANY_TSTAMP;
skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 1b4619008c4e..546ba76b35a5 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -438,8 +438,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
{
struct ctl_table *tbl;
- net->core.sysctl_somaxconn = SOMAXCONN;
-
tbl = netns_core_table;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 7753681195c1..86a2ed0fb219 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
DCCPF_SEQ_WMAX));
}
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+ struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ sock_hold(sk);
+ __tasklet_schedule(t);
+ }
+}
+
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
/* if we were blocked before, we may now send cwnd=1 packet */
if (sender_was_blocked)
- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ dccp_tasklet_schedule(sk);
/* restart backed-off timer */
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
out:
@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
done:
/* check if incoming Acks allow pending packets to be sent */
if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ dccp_tasklet_schedule(sk);
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c7799cdd3cf..6697b180e122 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -620,6 +620,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+ ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ireq_family = AF_INET;
ireq->ir_iif = sk->sk_bound_dev_if;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 28e8252cc5ea..6cbcf399d22b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -349,6 +349,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9d43c1f40274..ff3b058cf58c 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (skb == NULL)
goto out_release;
+ if (sk->sk_state == DCCP_CLOSED) {
+ rc = -ENOTCONN;
+ goto out_discard;
+ }
+
skb_reserve(skb, sk->sk_prot->max_header);
rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc != 0)
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3a2c34027758..2a952cbd6efa 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data)
else
dccp_write_xmit(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
}
static void dccp_write_xmit_timer(unsigned long data)
{
dccp_write_xmitlet(data);
- sock_put((struct sock *)data);
}
void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index e1d4d898a007..f0252768ecf4 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
- if (!opt_len) {
- printk(KERN_WARNING
- "Empty option to dns_resolver key\n");
+ if (opt_len <= 0 || opt_len > 128) {
+ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+ opt_len);
return -EINVAL;
}
@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
}
bad_option_value:
- printk(KERN_WARNING
- "Option '%*.*s' to dns_resolver key:"
- " bad/missing value\n",
- opt_nlen, opt_nlen, opt);
+ pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
+ opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa3e7d3..04b5450c5a55 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
unsigned long irqflags;
frame->is_supervision = is_supervision_frame(port->hsr, skb);
- frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
- frame->is_supervision);
+ frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
if (frame->node_src == NULL)
return -1; /* Unknown node and !is_supervision, or no mem */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea925816f79..284a9b820df8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
/* Get the hsr_node from which 'skb' was sent.
*/
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
bool is_sup)
{
+ struct list_head *node_db = &port->hsr->node_db;
struct hsr_node *node;
struct ethhdr *ethhdr;
u16 seq_out;
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
*/
seq_out = hsr_get_skb_sequence_nr(skb) - 1;
} else {
- WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+ /* this is called also for frames from master port and
+ * so warn only for non master ports
+ */
+ if (port->type != HSR_PT_MASTER)
+ WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
seq_out = HSR_SEQNR_START;
}
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f98f5a..4e04f0e868e9 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@ struct hsr_node;
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
bool is_sup);
void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
struct hsr_port *port);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index d7efbf0dad20..83af5339e582 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -204,9 +204,13 @@ static inline void lowpan_netlink_fini(void)
static int lowpan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct wpan_dev *wpan_dev;
- if (wdev->type != ARPHRD_IEEE802154)
+ if (ndev->type != ARPHRD_IEEE802154)
+ return NOTIFY_DONE;
+ wpan_dev = ndev->ieee802154_ptr;
+ if (!wpan_dev)
return NOTIFY_DONE;
switch (event) {
@@ -215,8 +219,8 @@ static int lowpan_device_event(struct notifier_block *unused,
* also delete possible lowpan interfaces which belongs
* to the wpan interface.
*/
- if (wdev->ieee802154_ptr->lowpan_dev)
- lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
+ if (wpan_dev->lowpan_dev)
+ lowpan_dellink(wpan_dev->lowpan_dev, NULL);
break;
default:
return NOTIFY_DONE;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index e0bd013a1e5e..bf5d26d83af0 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -304,12 +304,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
@@ -693,12 +693,12 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 22377c8ff14b..e8f862358518 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -393,7 +395,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb_push(skb, ihl);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index e60517eb1c3a..8cae791a7ace 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
/*unsigned long now; */
struct net *net = dev_net(dev);
- rt = ip_route_output(net, sip, tip, 0, 0);
+ rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
if (IS_ERR(rt))
return 1;
if (rt->dst.dev != dev) {
@@ -658,6 +658,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
unsigned char *arp_ptr;
struct rtable *rt;
unsigned char *sha;
+ unsigned char *tha = NULL;
__be32 sip, tip;
u16 dev_type = dev->type;
int addr_type;
@@ -729,6 +730,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
break;
#endif
default:
+ tha = arp_ptr;
arp_ptr += dev->addr_len;
}
memcpy(&tip, arp_ptr, 4);
@@ -847,8 +849,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
It is possible, that this option should be enabled for some
devices (strip is candidate)
*/
- is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
- addr_type == RTN_UNICAST;
+ is_garp = tip == sip && addr_type == RTN_UNICAST;
+
+ /* Unsolicited ARP _replies_ also require target hwaddr to be
+ * the same as source.
+ */
+ if (is_garp && arp->ar_op == htons(ARPOP_REPLY))
+ is_garp =
+ /* IPv4 over IEEE 1394 doesn't provide target
+ * hardware address field in its ARP payload.
+ */
+ tha &&
+ !memcmp(tha, sha, dev->addr_len);
if (!n &&
((arp->ar_op == htons(ARPOP_REPLY) &&
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 20fb25e3027b..3d8021d55336 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -268,10 +268,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esph->spi = x->id.spi;
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + clen + alen);
-
+ err = skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+ if (unlikely(err < 0))
+ goto error;
aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
@@ -481,7 +482,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
}
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ err = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out;
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 38c1c979ecb1..e1be24416c0e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -640,6 +640,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
fi->fib_nh, cfg))
return 1;
}
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (cfg->fc_flow &&
+ cfg->fc_flow != fi->fib_nh->nh_tclassid)
+ return 1;
+#endif
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0;
@@ -1606,18 +1611,20 @@ void fib_select_multipath(struct fib_result *res, int hash)
bool first = false;
for_nexthops(fi) {
+ if (net->ipv4.sysctl_fib_multipath_use_neigh) {
+ if (!fib_good_nh(nh))
+ continue;
+ if (!first) {
+ res->nh_sel = nhsel;
+ first = true;
+ }
+ }
+
if (hash > atomic_read(&nh->nh_upper_bound))
continue;
- if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
- fib_good_nh(nh)) {
- res->nh_sel = nhsel;
- return;
- }
- if (!first) {
- res->nh_sel = nhsel;
- first = true;
- }
+ res->nh_sel = nhsel;
+ return;
} endfor_nexthops(fi);
}
#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 631c0d0d7cf8..8effac0f2219 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
{
+ if (!hlist_unhashed(&q->list_evictor))
+ return false;
+
return q->net->low_thresh == 0 ||
frag_mem_limit(q->net) >= q->net->low_thresh;
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ddcd56c08d14..a6b34ac3139e 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -182,6 +182,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_dport = inet->inet_dport;
tw->tw_family = sk->sk_family;
tw->tw_reuse = sk->sk_reuse;
+ tw->tw_reuseport = sk->sk_reuseport;
tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
tw->tw_transparent = inet->transparent;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index bf62fa487262..5ddd64995e73 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -242,7 +242,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
return -EINVAL;
- ipc->oif = src_info->ipi6_ifindex;
+ if (src_info->ipi6_ifindex)
+ ipc->oif = src_info->ipi6_ifindex;
ipc->addr = src_info->ipi6_addr.s6_addr32[3];
continue;
}
@@ -272,7 +273,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
- ipc->oif = info->ipi_ifindex;
+ if (info->ipi_ifindex)
+ ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
@@ -1552,10 +1554,7 @@ int ip_getsockopt(struct sock *sk, int level,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
- err = nf_getsockopt(sk, PF_INET, optname, optval,
- &len);
- release_sock(sk);
+ err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
return err;
@@ -1587,9 +1586,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
- release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 96536a0d6e2d..e1271e75e107 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
struct net_device *dev;
char name[IFNAMSIZ];
- if (parms->name[0])
+ err = -E2BIG;
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
- err = -E2BIG;
+ } else {
+ if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
- }
strlcpy(name, ops->kind, IFNAMSIZ);
strncat(name, "%d", 2);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 27089f5ebbb1..742a3432c3ea 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1929,6 +1929,20 @@ int ip_mr_input(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
struct mr_table *mrt;
+ struct net_device *dev;
+
+ /* skb->dev passed in is the loX master dev for vrfs.
+ * As there are no vifs associated with loopback devices,
+ * get the proper interface that does have a vif associated with it.
+ */
+ dev = skb->dev;
+ if (netif_is_l3_master(skb->dev)) {
+ dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
+ if (!dev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+ }
/* Packet is looped back after forward, it should not be
* forwarded second time, but still can be delivered locally.
@@ -1966,7 +1980,7 @@ int ip_mr_input(struct sk_buff *skb)
/* already under rcu_read_lock() */
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
if (!cache) {
- int vif = ipmr_find_vif(mrt, skb->dev);
+ int vif = ipmr_find_vif(mrt, dev);
if (vif >= 0)
cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
@@ -1986,7 +2000,7 @@ int ip_mr_input(struct sk_buff *skb)
}
read_lock(&mrt_lock);
- vif = ipmr_find_vif(mrt, skb->dev);
+ vif = ipmr_find_vif(mrt, dev);
if (vif >= 0) {
int err2 = ipmr_cache_unresolved(mrt, vif, skb);
read_unlock(&mrt_lock);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 697538464e6e..d35815e5967b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -261,6 +261,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
if (table_base + v
!= arpt_next_entry(e)) {
+ if (unlikely(stackidx >= private->stacksize)) {
+ verdict = NF_DROP;
+ break;
+ }
jumpstack[stackidx++] = e;
}
@@ -415,17 +419,15 @@ static inline int check_target(struct arpt_entry *e, const char *name)
}
static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
+find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+ struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
- unsigned long pcnt;
int ret;
- pcnt = xt_percpu_counter_alloc();
- if (IS_ERR_VALUE(pcnt))
+ if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
- e->counters.pcnt = pcnt;
t = arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
@@ -443,7 +445,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
err:
module_put(t->u.kernel.target->me);
out:
- xt_percpu_counter_free(e->counters.pcnt);
+ xt_percpu_counter_free(&e->counters);
return ret;
}
@@ -523,7 +525,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
- xt_percpu_counter_free(e->counters.pcnt);
+ xt_percpu_counter_free(&e->counters);
}
/* Checks and translates the user-supplied table segment (held in
@@ -532,6 +534,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
static int translate_table(struct xt_table_info *newinfo, void *entry0,
const struct arpt_replace *repl)
{
+ struct xt_percpu_counter_alloc_state alloc_state = { 0 };
struct arpt_entry *iter;
unsigned int *offsets;
unsigned int i;
@@ -594,7 +597,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, repl->name, repl->size);
+ ret = find_check_entry(iter, repl->name, repl->size,
+ &alloc_state);
if (ret != 0)
break;
++i;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 7c00ce90adb8..e78f6521823f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -345,8 +345,13 @@ ipt_do_table(struct sk_buff *skb,
continue;
}
if (table_base + v != ipt_next_entry(e) &&
- !(e->ip.flags & IPT_F_GOTO))
+ !(e->ip.flags & IPT_F_GOTO)) {
+ if (unlikely(stackidx >= private->stacksize)) {
+ verdict = NF_DROP;
+ break;
+ }
jumpstack[stackidx++] = e;
+ }
e = get_entry(table_base, v);
continue;
@@ -535,7 +540,8 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
static int
find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
- unsigned int size)
+ unsigned int size,
+ struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
@@ -543,12 +549,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
unsigned int j;
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- unsigned long pcnt;
- pcnt = xt_percpu_counter_alloc();
- if (IS_ERR_VALUE(pcnt))
+ if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
- e->counters.pcnt = pcnt;
j = 0;
mtpar.net = net;
@@ -586,7 +589,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
cleanup_match(ematch, net);
}
- xt_percpu_counter_free(e->counters.pcnt);
+ xt_percpu_counter_free(&e->counters);
return ret;
}
@@ -674,7 +677,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net)
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
- xt_percpu_counter_free(e->counters.pcnt);
+ xt_percpu_counter_free(&e->counters);
}
/* Checks and translates the user-supplied table segment (held in
@@ -683,6 +686,7 @@ static int
translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
const struct ipt_replace *repl)
{
+ struct xt_percpu_counter_alloc_state alloc_state = { 0 };
struct ipt_entry *iter;
unsigned int *offsets;
unsigned int i;
@@ -742,7 +746,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, net, repl->name, repl->size);
+ ret = find_check_entry(iter, net, repl->name, repl->size,
+ &alloc_state);
if (ret != 0)
break;
++i;
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 574f7ebba0b6..ac8342dcb55e 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -252,16 +252,16 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
if (set_h245_addr(skb, protoff, data, dataoff, taddr,
&ct->tuplehash[!dir].tuple.dst.u3,
htons((port & htons(1)) ? nated_port + 1 :
- nated_port)) == 0) {
- /* Save ports */
- info->rtp_port[i][dir] = rtp_port;
- info->rtp_port[i][!dir] = htons(nated_port);
- } else {
+ nated_port))) {
nf_ct_unexpect_related(rtp_exp);
nf_ct_unexpect_related(rtcp_exp);
return -1;
}
+ /* Save ports */
+ info->rtp_port[i][dir] = rtp_port;
+ info->rtp_port[i][!dir] = htons(nated_port);
+
/* Success */
pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
&rtp_exp->tuple.src.u3.ip,
@@ -370,15 +370,15 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
/* Modify signal */
if (set_h225_addr(skb, protoff, data, dataoff, taddr,
&ct->tuplehash[!dir].tuple.dst.u3,
- htons(nated_port)) == 0) {
- /* Save ports */
- info->sig_port[dir] = port;
- info->sig_port[!dir] = htons(nated_port);
- } else {
+ htons(nated_port))) {
nf_ct_unexpect_related(exp);
return -1;
}
+ /* Save ports */
+ info->sig_port[dir] = port;
+ info->sig_port[!dir] = htons(nated_port);
+
pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n",
&exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
@@ -462,24 +462,27 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
/* Modify signal */
if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
&ct->tuplehash[!dir].tuple.dst.u3,
- htons(nated_port)) == 0) {
- /* Save ports */
- info->sig_port[dir] = port;
- info->sig_port[!dir] = htons(nated_port);
-
- /* Fix for Gnomemeeting */
- if (idx > 0 &&
- get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
- (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
- set_h225_addr(skb, protoff, data, 0, &taddr[0],
- &ct->tuplehash[!dir].tuple.dst.u3,
- info->sig_port[!dir]);
- }
- } else {
+ htons(nated_port))) {
nf_ct_unexpect_related(exp);
return -1;
}
+ /* Save ports */
+ info->sig_port[dir] = port;
+ info->sig_port[!dir] = htons(nated_port);
+
+ /* Fix for Gnomemeeting */
+ if (idx > 0 &&
+ get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
+ (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
+ if (set_h225_addr(skb, protoff, data, 0, &taddr[0],
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ info->sig_port[!dir])) {
+ nf_ct_unexpect_related(exp);
+ return -1;
+ }
+ }
+
/* Success */
pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n",
&exp->tuple.src.u3.ip,
@@ -550,9 +553,9 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
}
/* Modify signal */
- if (!set_h225_addr(skb, protoff, data, dataoff, taddr,
- &ct->tuplehash[!dir].tuple.dst.u3,
- htons(nated_port)) == 0) {
+ if (set_h225_addr(skb, protoff, data, dataoff, taddr,
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ htons(nated_port))) {
nf_ct_unexpect_related(exp);
return -1;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e612991c9185..9adcd4b1b3fd 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
- if (!daddr)
- return -EINVAL;
+ if (!daddr) {
+ err = -EINVAL;
+ goto out_free;
+ }
faddr = ipc.opt->opt.faddr;
}
tos = get_rttos(&ipc, inet);
@@ -841,6 +843,7 @@ back_from_confirm:
out:
ip_rt_put(rt);
+out_free:
if (free)
kfree(ipc.opt);
if (!err) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7ac319222558..4c9fbf4f5905 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -126,10 +126,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
-static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
+static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+
+static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
+
/*
* Interface to generic destination cache.
*/
@@ -2772,7 +2775,8 @@ static struct ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &ip_min_valid_pmtu,
},
{
.procname = "min_adv_mss",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0d1a767db1bb..84ffebf0192d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1118,7 +1118,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
flags = msg->msg_flags;
- if (flags & MSG_FASTOPEN) {
+ if ((flags & MSG_FASTOPEN) && !tp->repair) {
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
@@ -2523,7 +2523,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_REPAIR_QUEUE:
if (!tp->repair)
err = -EPERM;
- else if (val < TCP_QUEUES_NR)
+ else if ((unsigned int)val < TCP_QUEUES_NR)
tp->repair_queue = val;
else
err = -EINVAL;
@@ -2662,8 +2662,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
- /* Read the IP->Key mappings from userspace */
- err = tp->af_specific->md5_parse(sk, optval, optlen);
+ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ else
+ err = -EINVAL;
break;
#endif
case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 8ec60532be2b..9169859506b7 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -773,7 +773,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
}
}
}
- bbr->idle_restart = 0;
+ /* Restart after idle ends only once we process a new S/ACK for data */
+ if (rs->delivered > 0)
+ bbr->idle_restart = 0;
}
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2f107e46355c..52b0a84be765 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -115,6 +115,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -3618,7 +3619,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una)) {
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - tp->max_window)) {
- tcp_send_challenge_ack(sk, skb);
+ if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ tcp_send_challenge_ack(sk, skb);
return -1;
}
goto old_ack;
@@ -3941,11 +3943,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
int length = (th->doff << 2) - sizeof(*th);
const u8 *ptr = (const u8 *)(th + 1);
- /* If the TCP option is too short, we can short cut */
- if (length < TCPOLEN_MD5SIG)
- return NULL;
-
- while (length > 0) {
+ /* If not enough data remaining, we can short cut */
+ while (length >= TCPOLEN_MD5SIG) {
int opcode = *ptr++;
int opsize;
@@ -5606,10 +5605,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
else
tp->pred_flags = 0;
- if (!sock_flag(sk, SOCK_DEAD)) {
- sk->sk_state_change(sk);
- sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
- }
}
static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
@@ -5678,6 +5673,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_cookie foc = { .len = -1 };
int saved_clamp = tp->rx_opt.mss_clamp;
+ bool fastopen_fail;
tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
@@ -5781,10 +5777,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_finish_connect(sk, skb);
- if ((tp->syn_fastopen || tp->syn_data) &&
- tcp_rcv_fastopen_synack(sk, skb, &foc))
- return -1;
+ fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
+ tcp_rcv_fastopen_synack(sk, skb, &foc);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ sk->sk_state_change(sk);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+ }
+ if (fastopen_fail)
+ return -1;
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
icsk->icsk_ack.pingpong) {
@@ -5967,13 +5968,17 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
/* step 5: check the ACK field */
acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT) > 0;
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK) > 0;
+ if (!acceptable) {
+ if (sk->sk_state == TCP_SYN_RECV)
+ return 1; /* send one RST */
+ tcp_send_challenge_ack(sk, skb);
+ goto discard;
+ }
switch (sk->sk_state) {
case TCP_SYN_RECV:
- if (!acceptable)
- return 1;
-
if (!tp->srtt_us)
tcp_synack_rtt_meas(sk, req);
@@ -6043,14 +6048,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
* our SYNACK so stop the SYNACK timer.
*/
if (req) {
- /* Return RST if ack_seq is invalid.
- * Note that RFC793 only says to generate a
- * DUPACK for it but for TCP Fast Open it seems
- * better to treat this case like TCP_SYN_RECV
- * above.
- */
- if (!acceptable)
- return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3d7b59ecc76c..a69606031e5f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1580,7 +1580,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
*/
segs = max_t(u32, bytes / mss_now, min_tso_segs);
- return min_t(u32, segs, sk->sk_gso_max_segs);
+ return segs;
}
EXPORT_SYMBOL(tcp_tso_autosize);
@@ -1592,8 +1592,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
- return tso_segs ? :
- tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
+ if (!tso_segs)
+ tso_segs = tcp_tso_autosize(sk, mss_now,
+ sysctl_tcp_min_tso_segs);
+ return min_t(u32, tso_segs, sk->sk_gso_max_segs);
}
/* Returns the portion of skb which can be sent right away */
@@ -1907,6 +1909,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
}
}
+static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
+{
+ struct sk_buff *skb, *next;
+
+ skb = tcp_send_head(sk);
+ tcp_for_write_queue_from_safe(skb, next, sk) {
+ if (len <= skb->len)
+ break;
+
+ if (unlikely(TCP_SKB_CB(skb)->eor))
+ return false;
+
+ len -= skb->len;
+ }
+
+ return true;
+}
+
/* Create a new MTU probe if we are ready.
* MTU probe is regularly attempting to increase the path MTU by
* deliberately sending larger packets. This discovers routing
@@ -1979,6 +1999,9 @@ static int tcp_mtu_probe(struct sock *sk)
return 0;
}
+ if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
+ return -1;
+
/* We're allowed to probe. Build it now. */
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb)
@@ -2014,6 +2037,10 @@ static int tcp_mtu_probe(struct sock *sk)
/* We've eaten all the data from this skb.
* Throw it away. */
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+ /* If this is the last SKB we copy and eor is set
+ * we need to propagate it to the new skb.
+ */
+ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
} else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bef4a94ce1a0..aa2a20e918fd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -982,8 +982,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
if (ipc.opt && ipc.opt->opt.srr) {
- if (!daddr)
- return -EINVAL;
+ if (!daddr) {
+ err = -EINVAL;
+ goto out_free;
+ }
faddr = ipc.opt->opt.faddr;
connected = 0;
}
@@ -1090,6 +1092,7 @@ do_append_data:
out:
ip_rt_put(rt);
+out_free:
if (free)
kfree(ipc.opt);
if (!err)
@@ -1713,6 +1716,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
err = udplite_checksum_init(skb, uh);
if (err)
return err;
+
+ if (UDP_SKB_CB(skb)->partial_cov) {
+ skb->csum = inet_compute_pseudo(skb, proto);
+ return 0;
+ }
}
/* Note, we are only interested in != 0 or == 0, thus the
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 1594d9fc9c92..3a27cf762da1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -988,7 +988,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
ifa->prefix_len = pfxlen;
- ifa->flags = flags | IFA_F_TENTATIVE;
+ ifa->flags = flags;
+ /* No need to add the TENTATIVE flag for addresses with NODAD */
+ if (!(flags & IFA_F_NODAD))
+ ifa->flags |= IFA_F_TENTATIVE;
ifa->valid_lft = valid_lft;
ifa->prefered_lft = prefered_lft;
ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 0630a4d5daaa..0edc44cb254e 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -603,7 +605,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h->hop_limit = 0;
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 111ba55fd512..6a924be66e37 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -248,9 +248,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
esph->spi = x->id.spi;
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + clen + alen);
+ err = skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+ if (unlikely(err < 0))
+ goto error;
aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
@@ -423,7 +425,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
}
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0))
+ goto out;
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index c0cbcb259f5a..1dc023ca98fd 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -72,6 +72,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
err = udplite_checksum_init(skb, uh);
if (err)
return err;
+
+ if (UDP_SKB_CB(skb)->partial_cov) {
+ skb->csum = ip6_compute_pseudo(skb, proto);
+ return 0;
+ }
}
/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index db2613b4a049..caee5530ae2c 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -319,11 +319,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
if (t || !create)
return t;
- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ return NULL;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "ip6gre%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6gre_tunnel_setup);
if (!dev)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2e3db3619858..58a6eeeacbf7 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -356,6 +356,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
static inline int ip6_forward_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ struct dst_entry *dst = skb_dst(skb);
+
+ __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+ __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+
return dst_output(net, sk, skb);
}
@@ -549,8 +554,6 @@ int ip6_forward(struct sk_buff *skb)
hdr->hop_limit--;
- __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
- __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dst->dev,
ip6_forward_finish);
@@ -1291,7 +1294,7 @@ static int __ip6_append_data(struct sock *sk,
const struct sockcm_cookie *sockc)
{
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
int exthdrlen = 0;
int dst_exthdrlen = 0;
int hh_len;
@@ -1327,6 +1330,12 @@ static int __ip6_append_data(struct sock *sk,
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
+ /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
+ * the first fragment
+ */
+ if (headersize + transhdrlen > mtu)
+ goto emsgsize;
+
if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
(sk->sk_protocol == IPPROTO_UDP ||
sk->sk_protocol == IPPROTO_RAW)) {
@@ -1342,9 +1351,8 @@ static int __ip6_append_data(struct sock *sk,
if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
- ipv6_local_error(sk, EMSGSIZE, fl6,
- mtu - headersize +
- sizeof(struct ipv6hdr));
+ pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
+ ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
return -EMSGSIZE;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 131e6aa954bc..417af5ea2509 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -298,13 +298,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
struct net_device *dev;
struct ip6_tnl *t;
char name[IFNAMSIZ];
- int err = -ENOMEM;
+ int err = -E2BIG;
- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6tnl%%d");
-
+ }
+ err = -ENOMEM;
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6_tnl_dev_setup);
if (!dev)
@@ -1097,6 +1100,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
if (!dst) {
route_lookup:
+ /* add dsfield to flowlabel for route lookup */
+ fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
dst = ip6_route_output(net, NULL, fl6);
if (dst->error)
@@ -1127,8 +1133,13 @@ route_lookup:
max_headroom += 8;
mtu -= 8;
}
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ } else if (mtu < 576) {
+ mtu = 576;
+ }
+
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 912333586de6..beae93fd66d5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
char name[IFNAMSIZ];
int err;
- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6_vti%%d");
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
if (!dev)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 493a32f6a5f2..c66b9a87e995 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1343,10 +1343,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
- err = nf_getsockopt(sk, PF_INET6, optname, optval,
- &len);
- release_sock(sk);
+ err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
}
@@ -1385,10 +1382,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- lock_sock(sk);
- err = compat_nf_getsockopt(sk, PF_INET6,
- optname, optval, &len);
- release_sock(sk);
+ err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8e671457d10..3fe80e104b58 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1516,7 +1516,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
*(opt++) = (rd_len >> 3);
opt += 6;
- memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
+ skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
+ rd_len - 8);
}
void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
@@ -1723,6 +1724,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
fib6_run_gc(0, net, false);
+ /* fallthrough */
+ case NETDEV_UP:
idev = in6_dev_get(dev);
if (!idev)
break;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 55aacea24396..e26becc9a43d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -376,6 +376,10 @@ ip6t_do_table(struct sk_buff *skb,
}
if (table_base + v != ip6t_next_entry(e) &&
!(e->ipv6.flags & IP6T_F_GOTO)) {
+ if (unlikely(stackidx >= private->stacksize)) {
+ verdict = NF_DROP;
+ break;
+ }
jumpstack[stackidx++] = e;
}
@@ -566,7 +570,8 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
static int
find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
- unsigned int size)
+ unsigned int size,
+ struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
@@ -574,12 +579,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
unsigned int j;
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- unsigned long pcnt;
- pcnt = xt_percpu_counter_alloc();
- if (IS_ERR_VALUE(pcnt))
+ if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
- e->counters.pcnt = pcnt;
j = 0;
mtpar.net = net;
@@ -616,7 +618,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
cleanup_match(ematch, net);
}
- xt_percpu_counter_free(e->counters.pcnt);
+ xt_percpu_counter_free(&e->counters);
return ret;
}
@@ -703,8 +705,7 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net)
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
-
- xt_percpu_counter_free(e->counters.pcnt);
+ xt_percpu_counter_free(&e->counters);
}
/* Checks and translates the user-supplied table segment (held in
@@ -713,6 +714,7 @@ static int
translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
const struct ip6t_replace *repl)
{
+ struct xt_percpu_counter_alloc_state alloc_state = { 0 };
struct ip6t_entry *iter;
unsigned int *offsets;
unsigned int i;
@@ -772,7 +774,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, net, repl->name, repl->size);
+ ret = find_check_entry(iter, net, repl->name, repl->size,
+ &alloc_state);
if (ret != 0)
break;
++i;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index e0be97e636a4..322c7657165b 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
target, maniptype))
return false;
+
+ /* must reload, offset might have changed */
+ ipv6h = (void *)skb->data + iphdroff;
+
manip_addr:
if (maniptype == NF_NAT_MANIP_SRC)
ipv6h->saddr = target->src.u3.in6;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6e8bacb0b458..f6ac472acd0f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -856,6 +856,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
struct fib6_node *fn;
struct rt6_info *rt;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+ flags &= ~RT6_LOOKUP_F_IFACE;
+
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
@@ -1651,6 +1654,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
}
rt->dst.flags |= DST_HOST;
+ rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
atomic_set(&rt->dst.__refcnt, 1);
rt->rt6i_gateway = fl6->daddr;
@@ -2807,6 +2811,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
+ [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
@@ -2816,6 +2821,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_EXPIRES] = { .type = NLA_U32 },
+ [RTA_TABLE] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index db6d437002a6..dcb292134c21 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel *t = netdev_priv(dev);
- if (t->dev == sitn->fb_tunnel_dev) {
+ if (dev == sitn->fb_tunnel_dev) {
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
t->ip6rd.relay_prefix = 0;
t->ip6rd.prefixlen = 16;
@@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (!create)
goto failed;
- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "sit%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
if (!dev)
@@ -657,6 +659,7 @@ static int ipip6_rcv(struct sk_buff *skb)
if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6),
!net_eq(tunnel->net, dev_net(tunnel->dev))))
goto out;
+ iph = ip_hdr(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 91cbbf1c3f82..c2dfc32eb9f2 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2418,9 +2418,11 @@ static int afiucv_iucv_init(void)
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
- goto out_driver;
+ goto out_iucv_dev;
return 0;
+out_iucv_dev:
+ put_device(af_iucv_dev);
out_driver:
driver_unregister(&af_iucv_driver);
out_iucv:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 179cd9b1b1f4..cc306defcc19 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1375,24 +1375,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
struct list_head *head;
int index = 0;
struct strp_callbacks cb;
- int err;
+ int err = 0;
csk = csock->sk;
if (!csk)
return -EINVAL;
+ lock_sock(csk);
+
/* Only allow TCP sockets to be attached for now */
if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
- csk->sk_protocol != IPPROTO_TCP)
- return -EOPNOTSUPP;
+ csk->sk_protocol != IPPROTO_TCP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
/* Don't allow listeners or closed sockets */
- if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
- return -EOPNOTSUPP;
+ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
- if (!psock)
- return -ENOMEM;
+ if (!psock) {
+ err = -ENOMEM;
+ goto out;
+ }
psock->mux = mux;
psock->sk = csk;
@@ -1406,7 +1414,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
err = strp_init(&psock->strp, csk, &cb);
if (err) {
kmem_cache_free(kcm_psockp, psock);
- return err;
+ goto out;
}
write_lock_bh(&csk->sk_callback_lock);
@@ -1416,9 +1424,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
+ strp_stop(&psock->strp);
strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
- return -EALREADY;
+ err = -EALREADY;
+ goto out;
}
psock->save_data_ready = csk->sk_data_ready;
@@ -1454,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
/* Schedule RX work in case there are already bytes queued */
strp_check_rcv(&psock->strp);
- return 0;
+out:
+ release_sock(csk);
+
+ return err;
}
static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
@@ -1506,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock)
if (WARN_ON(psock->rx_kcm)) {
write_unlock_bh(&csk->sk_callback_lock);
+ release_sock(csk);
return;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 6482b001f19a..15150b412930 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3305,7 +3305,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
p += pol->sadb_x_policy_len*8;
sec_ctx = (struct sadb_x_sec_ctx *)p;
if (len < pol->sadb_x_policy_len*8 +
- sec_ctx->sadb_x_sec_len) {
+ sec_ctx->sadb_x_sec_len*8) {
*dir = -EINVAL;
goto out;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index cfc4dd8997e5..ead98e8e0b1f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1612,9 +1612,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
encap = cfg->encap;
/* Quick sanity checks */
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_type != SOCK_DGRAM) {
+ pr_debug("tunl %hu: fd %d wrong socket type\n",
+ tunnel_id, fd);
+ goto err;
+ }
switch (encap) {
case L2TP_ENCAPTYPE_UDP:
- err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_UDP) {
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1622,7 +1627,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
}
break;
case L2TP_ENCAPTYPE_IP:
- err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_L2TP) {
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 163f1fa53917..9b214f313cc0 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -590,6 +590,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OL2TP)
goto end;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index db916cf51ffe..85aae8c84aeb 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
- if (!sock_flag(sk, SOCK_ZAPPED))
+ if (!sock_flag(sk, SOCK_ZAPPED)) {
+ struct llc_sap *sap = llc->sap;
+
+ /* Hold this for release_sock(), so that llc_backlog_rcv()
+ * could still use it.
+ */
+ llc_sap_hold(sap);
llc_sap_remove_socket(llc->sap, sk);
- release_sock(sk);
+ release_sock(sk);
+ llc_sap_put(sap);
+ } else {
+ release_sock(sk);
+ }
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);
@@ -309,6 +319,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
int rc = -EINVAL;
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
+
+ lock_sock(sk);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
@@ -380,6 +392,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
out_put:
llc_sap_put(sap);
out:
+ release_sock(sk);
return rc;
}
@@ -913,6 +926,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (size > llc->dev->mtu)
size = llc->dev->mtu;
copied = size - hdrlen;
+ rc = -EINVAL;
+ if (copied < 0)
+ goto release;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd2672c..f8d4ab8ca1a5 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
{
- struct llc_sock *llc = llc_sk(sk);
-
- del_timer(&llc->pf_cycle_timer.timer);
- del_timer(&llc->ack_timer.timer);
- del_timer(&llc->rej_sent_timer.timer);
- del_timer(&llc->busy_state_timer.timer);
- llc->ack_must_be_send = 0;
- llc->ack_pf = 0;
+ llc_sk_stop_all_timers(sk, false);
return 0;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1bd2d45..d861b74ad068 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -951,6 +951,26 @@ out:
return sk;
}
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ if (sync) {
+ del_timer_sync(&llc->pf_cycle_timer.timer);
+ del_timer_sync(&llc->ack_timer.timer);
+ del_timer_sync(&llc->rej_sent_timer.timer);
+ del_timer_sync(&llc->busy_state_timer.timer);
+ } else {
+ del_timer(&llc->pf_cycle_timer.timer);
+ del_timer(&llc->ack_timer.timer);
+ del_timer(&llc->rej_sent_timer.timer);
+ del_timer(&llc->busy_state_timer.timer);
+ }
+
+ llc->ack_must_be_send = 0;
+ llc->ack_pf = 0;
+}
+
/**
* llc_sk_free - Frees a LLC socket
* @sk - socket to free
@@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
llc->state = LLC_CONN_OUT_OF_SVC;
/* Stop all (possibly) running timers */
- llc_conn_ac_stop_all_timers(sk, NULL);
+ llc_sk_stop_all_timers(sk, true);
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 07001b6d36cc..d7801f6877af 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -620,10 +620,11 @@ void sta_set_rate_info_tx(struct sta_info *sta,
int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
u16 brate;
- sband = sta->local->hw.wiphy->bands[
- ieee80211_get_sdata_band(sta->sdata)];
- brate = sband->bitrates[rate->idx].bitrate;
- rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ sband = ieee80211_get_sband(sta->sdata);
+ if (sband) {
+ brate = sband->bitrates[rate->idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ }
}
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
rinfo->bw = RATE_INFO_BW_40;
@@ -1218,10 +1219,11 @@ static int sta_apply_parameters(struct ieee80211_local *local,
int ret = 0;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
u32 mask, set;
- sband = local->hw.wiphy->bands[band];
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return -EINVAL;
mask = params->sta_flags_mask;
set = params->sta_flags_set;
@@ -1354,7 +1356,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
sband, params->supported_rates,
params->supported_rates_len,
- &sta->sta.supp_rates[band]);
+ &sta->sta.supp_rates[sband->band]);
}
if (params->ht_capa)
@@ -1370,8 +1372,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
/* returned value is only needed for rc update, but the
* rc isn't initialized here yet, so ignore it
*/
- __ieee80211_vht_handle_opmode(sdata, sta,
- params->opmode_notif, band);
+ __ieee80211_vht_handle_opmode(sdata, sta, params->opmode_notif,
+ sband->band);
}
if (params->support_p2p_ps >= 0)
@@ -2017,13 +2019,15 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
struct bss_parameters *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- enum nl80211_band band;
+ struct ieee80211_supported_band *sband;
u32 changed = 0;
if (!sdata_dereference(sdata->u.ap.beacon, sdata))
return -ENOENT;
- band = ieee80211_get_sdata_band(sdata);
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return -EINVAL;
if (params->use_cts_prot >= 0) {
sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
@@ -2036,7 +2040,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
if (!sdata->vif.bss_conf.use_short_slot &&
- band == NL80211_BAND_5GHZ) {
+ sband->band == NL80211_BAND_5GHZ) {
sdata->vif.bss_conf.use_short_slot = true;
changed |= BSS_CHANGED_ERP_SLOT;
}
@@ -2049,7 +2053,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
if (params->basic_rates) {
ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
- wiphy->bands[band],
+ wiphy->bands[sband->band],
params->basic_rates,
params->basic_rates_len,
&sdata->vif.bss_conf.basic_rates);
@@ -2337,10 +2341,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
enum nl80211_tx_power_setting txp_type = type;
bool update_txp_type = false;
+ bool has_monitor = false;
if (wdev) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ sdata = rtnl_dereference(local->monitor_sdata);
+ if (!sdata)
+ return -EOPNOTSUPP;
+ }
+
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2379,15 +2390,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ has_monitor = true;
+ continue;
+ }
sdata->user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
sdata->vif.bss_conf.txpower_type = txp_type;
}
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ continue;
ieee80211_recalc_txpower(sdata, update_txp_type);
+ }
mutex_unlock(&local->iflist_mtx);
+ if (has_monitor) {
+ sdata = rtnl_dereference(local->monitor_sdata);
+ if (sdata) {
+ sdata->user_power_level = local->user_power_level;
+ if (txp_type != sdata->vif.bss_conf.txpower_type)
+ update_txp_type = true;
+ sdata->vif.bss_conf.txpower_type = txp_type;
+
+ ieee80211_recalc_txpower(sdata, update_txp_type);
+ }
+ }
+
return 0;
}
@@ -2792,7 +2822,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
}
if (beacon->probe_resp_len) {
new_beacon->probe_resp_len = beacon->probe_resp_len;
- beacon->probe_resp = pos;
+ new_beacon->probe_resp = pos;
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
pos += beacon->probe_resp_len;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 09f77e4a8a79..49c8a9c9b91f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -164,7 +164,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
sdata->vif.type == NL80211_IFTYPE_NAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
- !sdata->vif.mu_mimo_owner)))
+ !sdata->vif.mu_mimo_owner &&
+ !(changed & BSS_CHANGED_TXPOWER))))
return;
if (!check_sdata_in_driver(sdata))
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 62d13eabe17f..a5acaf1efaab 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -427,7 +427,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
cfg80211_chandef_create(&chandef, cbss->channel,
- NL80211_CHAN_WIDTH_20_NOHT);
+ NL80211_CHAN_NO_HT);
chandef.width = sdata->u.ibss.chandef.width;
break;
case NL80211_CHAN_WIDTH_80:
@@ -439,7 +439,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
default:
/* fall back to 20 MHz for unsupported modes */
cfg80211_chandef_create(&chandef, cbss->channel,
- NL80211_CHAN_WIDTH_20_NOHT);
+ NL80211_CHAN_NO_HT);
break;
}
@@ -994,7 +994,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
enum nl80211_band band = rx_status->band;
enum nl80211_bss_scan_width scan_width;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+ struct ieee80211_supported_band *sband;
bool rates_updated = false;
u32 supp_rates = 0;
@@ -1004,6 +1004,10 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid))
return;
+ sband = local->hw.wiphy->bands[band];
+ if (WARN_ON(!sband))
+ return;
+
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 03dbc6bd8598..7fd544d970d9 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -991,21 +991,6 @@ sdata_assert_lock(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&sdata->wdev.mtx);
}
-static inline enum nl80211_band
-ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
-{
- enum nl80211_band band = NL80211_BAND_2GHZ;
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!WARN_ON(!chanctx_conf))
- band = chanctx_conf->def.chan->band;
- rcu_read_unlock();
-
- return band;
-}
-
static inline int
ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
{
@@ -1410,6 +1395,27 @@ IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev)
return container_of(wdev, struct ieee80211_sub_if_data, wdev);
}
+static inline struct ieee80211_supported_band *
+ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum nl80211_band band;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ band = chanctx_conf->def.chan->band;
+ rcu_read_unlock();
+
+ return local->hw.wiphy->bands[band];
+}
+
/* this struct represents 802.11n's RA/TID combination */
struct ieee80211_ra_tid {
u8 ra[ETH_ALEN];
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index a7aa54f45e19..fa7d757fef95 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1520,7 +1520,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
- BUG();
+ WARN_ON(1);
break;
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index b4b3fe078868..b2a27263d6ff 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -63,6 +63,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 basic_rates = 0;
struct cfg80211_chan_def sta_chan_def;
+ struct ieee80211_supported_band *sband;
/*
* As support for each feature is added, check for matching
@@ -83,7 +84,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
return false;
- ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata),
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return false;
+
+ ieee80211_sta_get_rates(sdata, ie, sband->band,
&basic_rates);
if (sdata->vif.bss_conf.basic_rates != basic_rates)
@@ -399,12 +404,13 @@ static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
- struct ieee80211_local *local = sdata->local;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
struct ieee80211_supported_band *sband;
u8 *pos;
- sband = local->hw.wiphy->bands[band];
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return -EINVAL;
+
if (!sband->ht_cap.ht_supported ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
@@ -462,12 +468,13 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
- struct ieee80211_local *local = sdata->local;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
struct ieee80211_supported_band *sband;
u8 *pos;
- sband = local->hw.wiphy->bands[band];
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return -EINVAL;
+
if (!sband->vht_cap.vht_supported ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
@@ -916,12 +923,16 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct cfg80211_csa_settings params;
struct ieee80211_csa_ie csa_ie;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
+ struct ieee80211_supported_band *sband;
int err;
u32 sta_flags;
sdata_assert_lock(sdata);
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return false;
+
sta_flags = IEEE80211_STA_DISABLE_VHT;
switch (sdata->vif.bss_conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -935,7 +946,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
memset(&csa_ie, 0, sizeof(csa_ie));
- err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
+ err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
sta_flags, sdata->vif.addr,
&csa_ie);
if (err < 0)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index fcba70e57073..2d3c4695e75b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -93,19 +93,23 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
- struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+ struct ieee80211_supported_band *sband;
struct sta_info *sta;
u32 erp_rates = 0, changed = 0;
int i;
bool short_slot = false;
- if (band == NL80211_BAND_5GHZ) {
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return changed;
+
+ if (sband->band == NL80211_BAND_5GHZ) {
/* (IEEE 802.11-2012 19.4.5) */
short_slot = true;
goto out;
- } else if (band != NL80211_BAND_2GHZ)
+ } else if (sband->band != NL80211_BAND_2GHZ) {
goto out;
+ }
for (i = 0; i < sband->n_bitrates; i++)
if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)
@@ -121,7 +125,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
continue;
short_slot = false;
- if (erp_rates & sta->sta.supp_rates[band])
+ if (erp_rates & sta->sta.supp_rates[sband->band])
short_slot = true;
else
break;
@@ -247,7 +251,15 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.self_prot.action_code = action;
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
+ struct ieee80211_supported_band *sband;
+ enum nl80211_band band;
+
+ sband = ieee80211_get_sband(sdata);
+ if (!sband) {
+ err = -EINVAL;
+ goto free;
+ }
+ band = sband->band;
/* capability info */
pos = skb_put(skb, 2);
@@ -393,13 +405,16 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems, bool insert)
{
struct ieee80211_local *local = sdata->local;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
struct ieee80211_supported_band *sband;
u32 rates, basic_rates = 0, changed = 0;
enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
- sband = local->hw.wiphy->bands[band];
- rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return;
+
+ rates = ieee80211_sta_get_rates(sdata, elems, sband->band,
+ &basic_rates);
spin_lock_bh(&sta->mesh->plink_lock);
sta->rx_stats.last_rx = jiffies;
@@ -410,9 +425,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
goto out;
sta->mesh->processed_beacon = true;
- if (sta->sta.supp_rates[band] != rates)
+ if (sta->sta.supp_rates[sband->band] != rates)
changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
- sta->sta.supp_rates[band] = rates;
+ sta->sta.supp_rates[sband->band] = rates;
if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem, sta))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1118c61f835d..e6f42d12222e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1851,11 +1851,16 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
u16 capab, bool erp_valid, u8 erp)
{
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_supported_band *sband;
u32 changed = 0;
bool use_protection;
bool use_short_preamble;
bool use_short_slot;
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return changed;
+
if (erp_valid) {
use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0;
use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0;
@@ -1865,7 +1870,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
}
use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
- if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
use_short_slot = true;
if (use_protection != bss_conf->use_cts_prot) {
@@ -2994,7 +2999,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out;
}
- sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
+ sband = ieee80211_get_sband(sdata);
+ if (!sband) {
+ mutex_unlock(&sdata->local->sta_mtx);
+ ret = false;
+ goto out;
+ }
/* Set up internal HT/VHT capabilities */
if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
@@ -4322,6 +4332,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
return -EINVAL;
+ /* If a reconfig is happening, bail out */
+ if (local->in_reconfig)
+ return -EBUSY;
+
if (assoc) {
rcu_read_lock();
have_sta = sta_info_get(sdata, cbss->bssid);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 206698bc93f4..e6096dfd0210 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -173,9 +173,11 @@ ieee80211_rate_control_ops_get(const char *name)
/* try default if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
- /* try built-in one if specific alg requested but not found */
- if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
+ /* Note: check for > 0 is intentional to avoid clang warning */
+ if (!ops && (strlen(CONFIG_MAC80211_RC_DEFAULT) > 0))
+ /* try built-in one if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
+
kernel_param_unlock(THIS_MODULE);
return ops;
@@ -875,7 +877,9 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
struct ieee80211_sta_rates *old;
struct ieee80211_supported_band *sband;
- sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+ sband = ieee80211_get_sband(sta->sdata);
+ if (!sband)
+ return -EINVAL;
rate_control_apply_mask_ratetbl(sta, sband, rates);
/*
* mac80211 guarantees that this function will not be called
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index ca0daeaff370..278e55020e47 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3611,6 +3611,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
}
return true;
case NL80211_IFTYPE_MESH_POINT:
+ if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
+ return false;
if (multicast)
return true;
return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 348700b424ea..1ecf3d07d1f5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -395,10 +395,15 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sta.smps_mode = IEEE80211_SMPS_OFF;
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
- struct ieee80211_supported_band *sband =
- hw->wiphy->bands[ieee80211_get_sdata_band(sdata)];
- u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
- IEEE80211_HT_CAP_SM_PS_SHIFT;
+ struct ieee80211_supported_band *sband;
+ u8 smps;
+
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ goto free_txq;
+
+ smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
/*
* Assume that hostapd advertises our caps in the beacon and
* this is the known_smps_mode for a station that just assciated
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index ad37b4e58c2f..72fe9bc7a1f9 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -200,6 +200,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
}
if (ieee80211_is_action(mgmt->frame_control) &&
+ !ieee80211_has_protected(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_HT &&
mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
ieee80211_sdata_running(sdata)) {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index afca7d103684..f20dcf1b1830 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -47,8 +47,7 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
!ifmgd->tdls_wider_bw_prohibited;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
- struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+ struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
bool vht = sband && sband->vht_cap.vht_supported;
u8 *pos = (void *)skb_put(skb, 10);
@@ -180,11 +179,14 @@ static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
u16 status_code)
{
+ struct ieee80211_supported_band *sband;
+
/* The capability will be 0 when sending a failure code */
if (status_code != 0)
return 0;
- if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_2GHZ) {
+ sband = ieee80211_get_sband(sdata);
+ if (sband && sband->band == NL80211_BAND_2GHZ) {
return WLAN_CAPABILITY_SHORT_SLOT_TIME |
WLAN_CAPABILITY_SHORT_PREAMBLE;
}
@@ -358,17 +360,20 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
u8 action_code, bool initiator,
const u8 *extra_ies, size_t extra_ies_len)
{
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
- struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
struct sta_info *sta = NULL;
size_t offset = 0, noffset;
u8 *pos;
- ieee80211_add_srates_ie(sdata, skb, false, band);
- ieee80211_add_ext_srates_ie(sdata, skb, false, band);
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return;
+
+ ieee80211_add_srates_ie(sdata, skb, false, sband->band);
+ ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
@@ -439,7 +444,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
* the same on all bands. The specification limits the setup to a
* single HT-cap, so use the current band for now.
*/
- sband = local->hw.wiphy->bands[band];
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
@@ -545,9 +549,13 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
size_t offset = 0, noffset;
struct sta_info *sta, *ap_sta;
- enum nl80211_band band = ieee80211_get_sdata_band(sdata);
+ struct ieee80211_supported_band *sband;
u8 *pos;
+ sband = ieee80211_get_sband(sdata);
+ if (!sband)
+ return;
+
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
@@ -612,7 +620,8 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
/* only include VHT-operation if not on the 2.4GHz band */
- if (band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+ if (sband->band != NL80211_BAND_2GHZ &&
+ sta->sta.vht_cap.vht_supported) {
/*
* if both peers support WIDER_BW, we can expand the chandef to
* a wider compatible one, up to 80MHz
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1ffd1e145c13..84582998f65f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4182,7 +4182,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
return bcn;
shift = ieee80211_vif_get_shift(vif);
- sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))];
+ sband = ieee80211_get_sband(vif_to_sdata(vif));
+ if (!sband)
+ return bcn;
+
ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false);
return bcn;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 545c79a42a77..a2756096b94a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1590,14 +1590,14 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
size_t num_rates;
u32 supp_rates, rate_flags;
int i, j, shift;
+
sband = sdata->local->hw.wiphy->bands[band];
+ if (WARN_ON(!sband))
+ return 1;
rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
shift = ieee80211_vif_get_shift(&sdata->vif);
- if (WARN_ON(!sband))
- return 1;
-
num_rates = sband->n_bitrates;
supp_rates = 0;
for (i = 0; i < elems->supp_rates_len +
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index c5a5a6959c1b..ffab94d61e1d 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -7,6 +7,7 @@
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/mpls.h>
+#include <linux/nospec.h>
#include <linux/vmalloc.h>
#include <net/ip.h>
#include <net/dst.h>
@@ -756,6 +757,22 @@ errout:
return err;
}
+static bool mpls_label_ok(struct net *net, unsigned int *index)
+{
+ bool is_ok = true;
+
+ /* Reserved labels may not be set */
+ if (*index < MPLS_LABEL_FIRST_UNRESERVED)
+ is_ok = false;
+
+ /* The full 20 bit range may not be supported. */
+ if (is_ok && *index >= net->mpls.platform_labels)
+ is_ok = false;
+
+ *index = array_index_nospec(*index, net->mpls.platform_labels);
+ return is_ok;
+}
+
static int mpls_route_add(struct mpls_route_config *cfg)
{
struct mpls_route __rcu **platform_label;
@@ -774,12 +791,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
index = find_free_label(net);
}
- /* Reserved labels may not be set */
- if (index < MPLS_LABEL_FIRST_UNRESERVED)
- goto errout;
-
- /* The full 20 bit range may not be supported. */
- if (index >= net->mpls.platform_labels)
+ if (!mpls_label_ok(net, &index))
goto errout;
/* Append makes no sense with mpls */
@@ -840,12 +852,7 @@ static int mpls_route_del(struct mpls_route_config *cfg)
index = cfg->rc_label;
- /* Reserved labels may not be removed */
- if (index < MPLS_LABEL_FIRST_UNRESERVED)
- goto errout;
-
- /* The full 20 bit range may not be supported */
- if (index >= net->mpls.platform_labels)
+ if (!mpls_label_ok(net, &index))
goto errout;
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
@@ -1279,10 +1286,9 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
&cfg->rc_label))
goto errout;
- /* Reserved labels may not be set */
- if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
+ if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
+ &cfg->rc_label))
goto errout;
-
break;
}
case RTA_VIA:
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2155c2498aed..c5f2350a2b50 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2393,11 +2393,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn));
cfg.syncid = dm->syncid;
- rtnl_lock();
- mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &cfg, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
- rtnl_unlock();
} else {
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state);
@@ -3092,6 +3088,17 @@ nla_put_failure:
return skb->len;
}
+static bool ip_vs_is_af_valid(int af)
+{
+ if (af == AF_INET)
+ return true;
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6 && ipv6_mod_enabled())
+ return true;
+#endif
+ return false;
+}
+
static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
@@ -3118,11 +3125,7 @@ static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
memset(usvc, 0, sizeof(*usvc));
usvc->af = nla_get_u16(nla_af);
-#ifdef CONFIG_IP_VS_IPV6
- if (usvc->af != AF_INET && usvc->af != AF_INET6)
-#else
- if (usvc->af != AF_INET)
-#endif
+ if (!ip_vs_is_af_valid(usvc->af))
return -EAFNOSUPPORT;
if (nla_fwmark) {
@@ -3488,12 +3491,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (ipvs->mixed_address_family_dests > 0)
return -EINVAL;
- rtnl_lock();
- mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &c,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
- mutex_unlock(&ipvs->sync_mutex);
- rtnl_unlock();
return ret;
}
@@ -3624,6 +3623,11 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
if (udest.af == 0)
udest.af = svc->af;
+ if (!ip_vs_is_af_valid(udest.af)) {
+ ret = -EAFNOSUPPORT;
+ goto out;
+ }
+
if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
/* The synchronization protocol is incompatible
* with mixed family services
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9350530c16c1..5fbf4b232592 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -48,6 +48,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
@@ -1359,15 +1360,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
/*
* Specifiy default interface for outgoing multicasts
*/
-static int set_mcast_if(struct sock *sk, char *ifname)
+static int set_mcast_if(struct sock *sk, struct net_device *dev)
{
- struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
- struct net *net = sock_net(sk);
-
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1395,19 +1390,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
* in the in_addr structure passed in as a parameter.
*/
static int
-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
{
- struct net *net = sock_net(sk);
struct ip_mreqn mreq;
- struct net_device *dev;
int ret;
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1422,15 +1412,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
#ifdef CONFIG_IP_VS_IPV6
static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
- char *ifname)
+ struct net_device *dev)
{
- struct net *net = sock_net(sk);
- struct net_device *dev;
int ret;
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1442,24 +1427,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
}
#endif
-static int bind_mcastif_addr(struct socket *sock, char *ifname)
+static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
{
- struct net *net = sock_net(sock->sk);
- struct net_device *dev;
__be32 addr;
struct sockaddr_in sin;
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
-
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
if (!addr)
pr_err("You probably need to specify IP address on "
"multicast interface.\n");
IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
- ifname, &addr);
+ dev->name, &addr);
/* Now bind the socket with the address of multicast interface */
sin.sin_family = AF_INET;
@@ -1492,7 +1471,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
/*
* Set up sending multicast socket over UDP
*/
-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+static int make_send_sock(struct netns_ipvs *ipvs, int id,
+ struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1504,9 +1484,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
- return ERR_PTR(result);
+ goto error;
}
- result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
+ *sock_ret = sock;
+ result = set_mcast_if(sock->sk, dev);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
@@ -1521,7 +1502,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
set_sock_size(sock->sk, 1, result);
if (AF_INET == ipvs->mcfg.mcast_af)
- result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+ result = bind_mcastif_addr(sock, dev);
else
result = 0;
if (result < 0) {
@@ -1537,19 +1518,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
goto error;
}
- return sock;
+ return 0;
error:
- sock_release(sock);
- return ERR_PTR(result);
+ return result;
}
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
- int ifindex)
+static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+ struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1561,8 +1541,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
- return ERR_PTR(result);
+ goto error;
}
+ *sock_ret = sock;
/* it is equivalent to the REUSEADDR option in user-space */
sock->sk->sk_reuse = SK_CAN_REUSE;
result = sysctl_sync_sock_size(ipvs);
@@ -1570,7 +1551,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
- sock->sk->sk_bound_dev_if = ifindex;
+ sock->sk->sk_bound_dev_if = dev->ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
@@ -1581,21 +1562,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
#ifdef CONFIG_IP_VS_IPV6
if (ipvs->bcfg.mcast_af == AF_INET6)
result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
- ipvs->bcfg.mcast_ifn);
+ dev);
else
#endif
result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
- ipvs->bcfg.mcast_ifn);
+ dev);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
}
- return sock;
+ return 0;
error:
- sock_release(sock);
- return ERR_PTR(result);
+ return result;
}
@@ -1780,13 +1760,12 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state)
{
- struct ip_vs_sync_thread_data *tinfo;
+ struct ip_vs_sync_thread_data *tinfo = NULL;
struct task_struct **array = NULL, *task;
- struct socket *sock;
struct net_device *dev;
char *name;
int (*threadfn)(void *data);
- int id, count, hlen;
+ int id = 0, count, hlen;
int result = -ENOMEM;
u16 mtu, min_mtu;
@@ -1794,6 +1773,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+ /* Do not hold one mutex and then to block on another */
+ for (;;) {
+ rtnl_lock();
+ if (mutex_trylock(&ipvs->sync_mutex))
+ break;
+ rtnl_unlock();
+ mutex_lock(&ipvs->sync_mutex);
+ if (rtnl_trylock())
+ break;
+ mutex_unlock(&ipvs->sync_mutex);
+ }
+
if (!ipvs->sync_state) {
count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
ipvs->threads_mask = count - 1;
@@ -1812,7 +1803,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
if (!dev) {
pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
- return -ENODEV;
+ result = -ENODEV;
+ goto out_early;
}
hlen = (AF_INET6 == c->mcast_af) ?
sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1829,26 +1821,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
c->sync_maxlen = mtu - hlen;
if (state == IP_VS_STATE_MASTER) {
+ result = -EEXIST;
if (ipvs->ms)
- return -EEXIST;
+ goto out_early;
ipvs->mcfg = *c;
name = "ipvs-m:%d:%d";
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
+ result = -EEXIST;
if (ipvs->backup_threads)
- return -EEXIST;
+ goto out_early;
ipvs->bcfg = *c;
name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup;
} else {
- return -EINVAL;
+ result = -EINVAL;
+ goto out_early;
}
if (state == IP_VS_STATE_MASTER) {
struct ipvs_master_sync_state *ms;
+ result = -ENOMEM;
ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
if (!ipvs->ms)
goto out;
@@ -1864,39 +1860,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
} else {
array = kzalloc(count * sizeof(struct task_struct *),
GFP_KERNEL);
+ result = -ENOMEM;
if (!array)
goto out;
}
- tinfo = NULL;
for (id = 0; id < count; id++) {
- if (state == IP_VS_STATE_MASTER)
- sock = make_send_sock(ipvs, id);
- else
- sock = make_receive_sock(ipvs, id, dev->ifindex);
- if (IS_ERR(sock)) {
- result = PTR_ERR(sock);
- goto outtinfo;
- }
+ result = -ENOMEM;
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo)
- goto outsocket;
+ goto out;
tinfo->ipvs = ipvs;
- tinfo->sock = sock;
+ tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) {
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
- goto outtinfo;
+ goto out;
} else {
tinfo->buf = NULL;
}
tinfo->id = id;
+ if (state == IP_VS_STATE_MASTER)
+ result = make_send_sock(ipvs, id, dev, &tinfo->sock);
+ else
+ result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
+ if (result < 0)
+ goto out;
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
if (IS_ERR(task)) {
result = PTR_ERR(task);
- goto outtinfo;
+ goto out;
}
tinfo = NULL;
if (state == IP_VS_STATE_MASTER)
@@ -1913,20 +1908,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock);
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
+
/* increase the module use count */
ip_vs_use_count_inc();
return 0;
-outsocket:
- sock_release(sock);
-
-outtinfo:
- if (tinfo) {
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
- }
+out:
+ /* We do not need RTNL lock anymore, release it here so that
+ * sock_release below and in the kthreads can use rtnl_lock
+ * to leave the mcast group.
+ */
+ rtnl_unlock();
count = id;
while (count-- > 0) {
if (state == IP_VS_STATE_MASTER)
@@ -1934,13 +1929,23 @@ outtinfo:
else
kthread_stop(array[count]);
}
- kfree(array);
-
-out:
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms);
ipvs->ms = NULL;
}
+ mutex_unlock(&ipvs->sync_mutex);
+ if (tinfo) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ kfree(tinfo);
+ }
+ kfree(array);
+ return result;
+
+out_early:
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
return result;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 750b8bf13e60..2039fd7daf4e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1542,7 +1542,6 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
- int cpu;
spinlock_t *lockp;
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
@@ -1564,24 +1563,40 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
cond_resched();
}
+ return NULL;
+found:
+ atomic_inc(&ct->ct_general.use);
+ spin_unlock(lockp);
+ local_bh_enable();
+ return ct;
+}
+
+static void
+__nf_ct_unconfirmed_destroy(struct net *net)
+{
+ int cpu;
+
for_each_possible_cpu(cpu) {
- struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct ct_pcpu *pcpu;
+
+ pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
+ struct nf_conn *ct;
+
ct = nf_ct_tuplehash_to_ctrack(h);
- if (iter(ct, data))
- set_bit(IPS_DYING_BIT, &ct->status);
+
+ /* we cannot call iter() on unconfirmed list, the
+ * owning cpu can reallocate ct->ext at any time.
+ */
+ set_bit(IPS_DYING_BIT, &ct->status);
}
spin_unlock_bh(&pcpu->lock);
cond_resched();
}
- return NULL;
-found:
- atomic_inc(&ct->ct_general.use);
- spin_unlock(lockp);
- local_bh_enable();
- return ct;
}
void nf_ct_iterate_cleanup(struct net *net,
@@ -1596,6 +1611,10 @@ void nf_ct_iterate_cleanup(struct net *net,
if (atomic_read(&net->ct.count) == 0)
return;
+ __nf_ct_unconfirmed_destroy(net);
+
+ synchronize_net();
+
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 6dc44d9b4190..92e69af41656 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -379,17 +379,33 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
struct nf_conntrack_helper *cur;
- int ret = 0;
+ int ret = 0, i;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
mutex_lock(&nf_ct_helper_mutex);
- hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
- if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
- ret = -EEXIST;
- goto out;
+ for (i = 0; i < nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry(cur, &nf_ct_helper_hash[i], hnode) {
+ if (!strcmp(cur->name, me->name) &&
+ (cur->tuple.src.l3num == NFPROTO_UNSPEC ||
+ cur->tuple.src.l3num == me->tuple.src.l3num) &&
+ cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ }
+
+ /* avoid unpredictable behaviour for auto_assign_helper */
+ if (!(me->flags & NF_CT_HELPER_F_USERSPACE)) {
+ hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
+ if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
+ &mask)) {
+ ret = -EEXIST;
+ goto out;
+ }
}
}
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d5caed5bcfb1..9e30fd0ab227 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -890,8 +890,13 @@ restart:
}
out:
local_bh_enable();
- if (last)
+ if (last) {
+ /* nf ct hash resize happened, now clear the leftover. */
+ if ((struct nf_conn *)cb->args[1] == last)
+ cb->args[1] = 0;
+
nf_ct_put(last);
+ }
while (i) {
i--;
@@ -1008,9 +1013,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
- struct nf_conntrack_tuple *tuple,
- enum ctattr_type type, u_int8_t l3num,
- struct nf_conntrack_zone *zone)
+ struct nf_conntrack_tuple *tuple, u32 type,
+ u_int8_t l3num, struct nf_conntrack_zone *zone)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
@@ -2409,7 +2413,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
- enum ctattr_expect type)
+ u32 type)
{
struct nlattr *nest_parms;
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index fbce552a796e..7d7466dbf663 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
const struct nf_conn *ct,
u16 *rover)
{
- unsigned int range_size, min, i;
+ unsigned int range_size, min, max, i;
__be16 *portptr;
u_int16_t off;
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
}
} else {
min = ntohs(range->min_proto.all);
- range_size = ntohs(range->max_proto.all) - min + 1;
+ max = ntohs(range->max_proto.all);
+ if (unlikely(max < min))
+ swap(max, min);
+ range_size = max - min + 1;
}
if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 31ca94793aa9..b9dd4e960426 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -82,8 +82,7 @@ static void nft_dynset_eval(const struct nft_expr *expr,
nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
timeout = priv->timeout ? : set->timeout;
*nft_set_ext_expiration(ext) = jiffies + timeout;
- } else if (sexpr == NULL)
- goto out;
+ }
if (sexpr != NULL)
sexpr->ops->eval(sexpr, regs, pkt);
@@ -92,7 +91,7 @@ static void nft_dynset_eval(const struct nft_expr *expr,
regs->verdict.code = NFT_BREAK;
return;
}
-out:
+
if (!priv->invert)
regs->verdict.code = NFT_BREAK;
}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e47ade305a46..59be89813a29 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -39,6 +39,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
+#define XT_PCPU_BLOCK_SIZE 4096
+
struct compat_delta {
unsigned int offset; /* offset in kernel */
int delta; /* delta in 32bit user land */
@@ -365,6 +367,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
return buf;
}
+/**
+ * xt_check_proc_name - check that name is suitable for /proc file creation
+ *
+ * @name: file name candidate
+ * @size: length of buffer
+ *
+ * some x_tables modules wish to create a file in /proc.
+ * This function makes sure that the name is suitable for this
+ * purpose, it checks that name is NUL terminated and isn't a 'special'
+ * name, like "..".
+ *
+ * returns negative number on error or 0 if name is useable.
+ */
+int xt_check_proc_name(const char *name, unsigned int size)
+{
+ if (name[0] == '\0')
+ return -EINVAL;
+
+ if (strnlen(name, size) == size)
+ return -ENAMETOOLONG;
+
+ if (strcmp(name, ".") == 0 ||
+ strcmp(name, "..") == 0 ||
+ strchr(name, '/'))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(xt_check_proc_name);
+
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
@@ -1004,8 +1036,10 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
list_for_each_entry(t, &init_net.xt.tables[af], list) {
if (strcmp(t->name, name))
continue;
- if (!try_module_get(t->me))
+ if (!try_module_get(t->me)) {
+ mutex_unlock(&xt[af].mutex);
return NULL;
+ }
mutex_unlock(&xt[af].mutex);
if (t->table_init(net) != 0) {
@@ -1619,6 +1653,59 @@ void xt_proto_fini(struct net *net, u_int8_t af)
}
EXPORT_SYMBOL_GPL(xt_proto_fini);
+/**
+ * xt_percpu_counter_alloc - allocate x_tables rule counter
+ *
+ * @state: pointer to xt_percpu allocation state
+ * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
+ *
+ * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
+ * contain the address of the real (percpu) counter.
+ *
+ * Rule evaluation needs to use xt_get_this_cpu_counter() helper
+ * to fetch the real percpu counter.
+ *
+ * To speed up allocation and improve data locality, a 4kb block is
+ * allocated.
+ *
+ * xt_percpu_counter_alloc_state contains the base address of the
+ * allocated page and the current sub-offset.
+ *
+ * returns false on error.
+ */
+bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
+ struct xt_counters *counter)
+{
+ BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
+
+ if (nr_cpu_ids <= 1)
+ return true;
+
+ if (!state->mem) {
+ state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
+ XT_PCPU_BLOCK_SIZE);
+ if (!state->mem)
+ return false;
+ }
+ counter->pcnt = (__force unsigned long)(state->mem + state->off);
+ state->off += sizeof(*counter);
+ if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
+ state->mem = NULL;
+ state->off = 0;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
+
+void xt_percpu_counter_free(struct xt_counters *counters)
+{
+ unsigned long pcnt = counters->pcnt;
+
+ if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
+ free_percpu((void __percpu *)pcnt);
+}
+EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
+
static int __net_init xt_net_init(struct net *net)
{
int i;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 6669e68d589e..00ef8243d48d 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -168,8 +168,10 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
goto err_put_timeout;
}
timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
- if (timeout_ext == NULL)
+ if (!timeout_ext) {
ret = -ENOMEM;
+ goto err_put_timeout;
+ }
rcu_read_unlock();
return ret;
@@ -201,6 +203,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
struct nf_conntrack_zone zone;
+ struct nf_conn_help *help;
struct nf_conn *ct;
int ret = -EOPNOTSUPP;
@@ -249,7 +252,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
if (info->timeout[0]) {
ret = xt_ct_set_timeout(ct, par, info->timeout);
if (ret < 0)
- goto err3;
+ goto err4;
}
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
nf_conntrack_get(&ct->ct_general);
@@ -257,6 +260,10 @@ out:
info->ct = ct;
return 0;
+err4:
+ help = nfct_help(ct);
+ if (help)
+ module_put(help->helper->me);
err3:
nf_ct_tmpl_free(ct);
err2:
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index daf45da448fa..bb5d6a058fb7 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -147,11 +147,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
(unsigned long) info->timer);
info->timer->refcnt = 1;
+ INIT_WORK(&info->timer->work, idletimer_tg_work);
+
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
- INIT_WORK(&info->timer->work, idletimer_tg_work);
-
return 0;
out_free_attr:
@@ -192,7 +192,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
pr_debug("timeout value is zero\n");
return -EINVAL;
}
-
+ if (info->timeout >= INT_MAX / 1000) {
+ pr_debug("timeout value is too big\n");
+ return -EINVAL;
+ }
if (info->label[0] == '\0' ||
strnlen(info->label,
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 3ba31c194cce..0858fe17e14a 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par)
goto exit_alloc;
}
- /* See if we need to set up a timer */
- if (ledinfo->delay > 0)
- setup_timer(&ledinternal->timer, led_timeout_callback,
- (unsigned long)ledinternal);
+ /* Since the letinternal timer can be shared between multiple targets,
+ * always set it up, even if the current target does not need it
+ */
+ setup_timer(&ledinternal->timer, led_timeout_callback,
+ (unsigned long)ledinternal);
list_add_tail(&ledinternal->list, &xt_led_triggers);
@@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
list_del(&ledinternal->list);
- if (ledinfo->delay > 0)
- del_timer_sync(&ledinternal->timer);
+ del_timer_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index b89b688e9d01..a1a29cdc58fc 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -794,8 +794,9 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
struct hashlimit_cfg2 cfg = {};
int ret;
- if (info->name[sizeof(info->name) - 1] != '\0')
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
@@ -809,9 +810,11 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
static int hashlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
+ int ret;
- if (info->name[sizeof(info->name) - 1] != '\0')
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
info->name, 2);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index e3b7a09b103e..79d7ad621a80 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -361,9 +361,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
- if (info->name[0] == '\0' ||
- strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
- return -EINVAL;
+ ret = xt_check_proc_name(info->name, sizeof(info->name));
+ if (ret)
+ return ret;
if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e1c123d4cdda..15e6e7b9fd2b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1054,6 +1054,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
+ if (alen < sizeof(struct sockaddr_nl))
+ return -EINVAL;
+
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
@@ -1792,6 +1795,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen) {
err = -EINVAL;
+ if (msg->msg_namelen < sizeof(struct sockaddr_nl))
+ goto out;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
@@ -2258,7 +2263,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
if (cb->start) {
ret = cb->start(cb);
if (ret)
- goto error_unlock;
+ goto error_put;
}
nlk->cb_running = true;
@@ -2278,6 +2283,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
*/
return -EINTR;
+error_put:
+ module_put(control->module);
error_unlock:
sock_put(sk);
mutex_unlock(nlk->cb_mutex);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 49c28e8ef01b..9192a6143523 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1103,6 +1103,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
+ bool delivered = false;
int err;
for_each_net_rcu(net) {
@@ -1114,14 +1115,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
- if (err)
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
goto error;
}
prev = net;
}
- return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+ return err;
+ return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
return err;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index b28e45b691de..466393936db9 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -396,10 +396,38 @@ ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
u16 proto, const struct sk_buff *skb)
{
struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_expect *exp;
if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
return NULL;
- return __nf_ct_expect_find(net, zone, &tuple);
+
+ exp = __nf_ct_expect_find(net, zone, &tuple);
+ if (exp) {
+ struct nf_conntrack_tuple_hash *h;
+
+ /* Delete existing conntrack entry, if it clashes with the
+ * expectation. This can happen since conntrack ALGs do not
+ * check for clashes between (new) expectations and existing
+ * conntrack entries. nf_conntrack_in() will check the
+ * expectations only if a conntrack entry can not be found,
+ * which can lead to OVS finding the expectation (here) in the
+ * init direction, but which will not be removed by the
+ * nf_conntrack_in() call, if a matching conntrack entry is
+ * found instead. In this case all init direction packets
+ * would be reported as new related packets, while reply
+ * direction packets would be reported as un-related
+ * established packets.
+ */
+ h = nf_conntrack_find_get(net, zone, &tuple);
+ if (h) {
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ nf_ct_delete(ct, 0, 0);
+ nf_conntrack_put(&ct->ct_general);
+ }
+ }
+
+ return exp;
}
/* This replicates logic from nf_conntrack_core.c that is not exported. */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 1668916bdbde..326945d9be5f 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1296,13 +1296,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
/* The nlattr stream should already have been validated */
nla_for_each_nested(nla, attr, rem) {
- if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
- if (tbl[nla_type(nla)].next)
- tbl = tbl[nla_type(nla)].next;
- nlattr_set(nla, val, tbl);
- } else {
+ if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+ nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
+ else
memset(nla_data(nla), val, nla_len(nla));
- }
if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 00994de54d57..c0ba00a4d292 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -334,11 +334,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
skb_set_queue_mapping(skb, queue_index);
}
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
*/
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
@@ -353,8 +353,13 @@ static void register_prot_hook(struct sock *sk)
}
}
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held. If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+ lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+ __register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
* the po->bind_lock and do a synchronize_net to make sure no
* asynchronous packet processing paths still refer to the elements
* of po->prot_hook. If the sync parameter is false, it is the
@@ -364,6 +369,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
+ lockdep_assert_held_once(&po->bind_lock);
+
po->running = 0;
if (po->fanout)
@@ -3018,6 +3025,7 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
+ lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
@@ -3027,6 +3035,7 @@ static int packet_release(struct socket *sock)
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
+ release_sock(sk);
f = fanout_release(sk);
@@ -3260,7 +3269,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
if (proto) {
po->prot_hook.type = proto;
- register_prot_hook(sk);
+ __register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
@@ -3655,6 +3664,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
union tpacket_req_u req_u;
int len;
+ lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
@@ -3665,12 +3675,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
len = sizeof(req_u.req3);
break;
}
- if (optlen < len)
- return -EINVAL;
- if (copy_from_user(&req_u.req, optval, len))
- return -EFAULT;
- return packet_set_ring(sk, &req_u, 0,
- optname == PACKET_TX_RING);
+ if (optlen < len) {
+ ret = -EINVAL;
+ } else {
+ if (copy_from_user(&req_u.req, optval, len))
+ ret = -EFAULT;
+ else
+ ret = packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_COPY_THRESH:
{
@@ -3736,12 +3751,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_loss = !!val;
- return 0;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_loss = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_AUXDATA:
{
@@ -3752,7 +3773,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->auxdata = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_ORIGDEV:
@@ -3764,7 +3787,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->origdev = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_VNET_HDR:
@@ -3773,15 +3798,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (sock->type != SOCK_RAW)
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->has_vnet_hdr = !!val;
- return 0;
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->has_vnet_hdr = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_TIMESTAMP:
{
@@ -3819,11 +3849,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_tx_has_off = !!val;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_tx_has_off = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
return 0;
}
case PACKET_QDISC_BYPASS:
@@ -4220,7 +4256,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
- lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4356,7 +4391,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
- release_sock(sk);
return err;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc34d6b3..1309e2a7baad 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -109,10 +109,12 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
- unsigned int running:1, /* prot_hook is attached*/
- auxdata:1,
+ unsigned int running; /* bind_lock must be held */
+ unsigned int auxdata:1, /* writer must hold sock lock */
origdev:1,
- has_vnet_hdr:1;
+ has_vnet_hdr:1,
+ tp_loss:1,
+ tp_tx_has_off:1;
int pressure;
int ifindex; /* bound device */
__be16 num;
@@ -122,8 +124,6 @@ struct packet_sock {
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
- unsigned int tp_loss:1;
- unsigned int tp_tx_has_off:1;
unsigned int tp_tstamp;
struct net_device __rcu *cached_dev;
int (*xmit)(struct sk_buff *skb);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 095f6ce583fe..adb53ae97a02 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
rs, &addr, (int)ntohs(*port));
break;
} else {
+ rs->rs_bound_addr = 0;
rds_sock_put(rs);
ret = -ENOMEM;
break;
diff --git a/net/rds/send.c b/net/rds/send.c
index ef53d164e146..50241d30e16d 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -983,10 +983,15 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
if (conn->c_npaths == 0 && hash != 0) {
rds_send_ping(conn);
- if (conn->c_npaths == 0) {
- wait_event_interruptible(conn->c_hs_waitq,
- (conn->c_npaths != 0));
- }
+ /* The underlying connection is not up yet. Need to wait
+ * until it is up to be sure that the non-zero c_path can be
+ * used. But if we are interrupted, we have to use the zero
+ * c_path in case the connection ends up being non-MP capable.
+ */
+ if (conn->c_npaths == 0)
+ if (wait_event_interruptible(conn->c_hs_waitq,
+ conn->c_npaths != 0))
+ hash = 0;
if (conn->c_npaths == 1)
hash = 0;
}
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 76c01cbd56e3..d6d8b34c5f22 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -138,13 +138,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
ret = rfkill_register(rfkill->rfkill_dev);
if (ret < 0)
- return ret;
+ goto err_destroy;
platform_set_drvdata(pdev, rfkill);
dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
return 0;
+
+err_destroy:
+ rfkill_destroy(rfkill->rfkill_dev);
+
+ return ret;
}
static int rfkill_gpio_remove(struct platform_device *pdev)
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 5dab1ff3a6c2..59d328603312 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -391,7 +391,7 @@ send_fragmentable:
(char *)&opt, sizeof(opt));
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 1, iov[0].iov_len);
+ iov, 2, len);
opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket,
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 4374e7b9c7bf..90df95e18ea4 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -229,7 +229,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
len &= ~(call->conn->size_align - 1);
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, len);
+ err = skb_to_sgvec(skb, sg, 0, len);
+ if (unlikely(err < 0))
+ goto out;
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_encrypt(req);
@@ -325,7 +327,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;
_enter("");
@@ -342,7 +344,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
goto nomem;
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, offset, 8);
+ ret = skb_to_sgvec(skb, sg, offset, 8);
+ if (unlikely(ret < 0))
+ return ret;
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -405,7 +409,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;
_enter(",{%d}", skb->len);
@@ -429,7 +433,12 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
}
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, offset, len);
+ ret = skb_to_sgvec(skb, sg, offset, len);
+ if (unlikely(ret < 0)) {
+ if (sg != _sg)
+ kfree(sg);
+ return ret;
+ }
/* decrypt from the session key */
token = call->conn->params.key->payload.data[0];
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f3117324146a..67adb4ecded2 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -95,8 +95,10 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
continue;
nest = nla_nest_start(skb, n_i);
- if (nest == NULL)
+ if (nest == NULL) {
+ index--;
goto nla_put_failure;
+ }
err = tcf_action_dump_1(skb, p, 0, 0);
if (err < 0) {
index--;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d3960033f61..40496f34bdb3 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -245,10 +245,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
{
- if (cfg->is_ebpf)
- bpf_prog_put(cfg->filter);
- else
- bpf_prog_destroy(cfg->filter);
+ struct bpf_prog *filter = cfg->filter;
+
+ if (filter) {
+ if (cfg->is_ebpf)
+ bpf_prog_put(filter);
+ else
+ bpf_prog_destroy(filter);
+ }
kfree(cfg->bpf_ops);
kfree(cfg->bpf_name);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index e0defcef376d..24a0c66394a0 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -180,6 +180,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
struct tcphdr *tcph;
const struct iphdr *iph;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ return 1;
+
tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
if (tcph == NULL)
return 0;
@@ -201,6 +204,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
struct tcphdr *tcph;
const struct ipv6hdr *ip6h;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ return 1;
+
tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
if (tcph == NULL)
return 0;
@@ -224,6 +230,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
const struct iphdr *iph;
u16 ul;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ return 1;
+
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
@@ -277,6 +286,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
const struct ipv6hdr *ip6h;
u16 ul;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ return 1;
+
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 95c463cbb9a6..235db2c9bbbb 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -634,7 +634,7 @@ int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
}
}
- return 0;
+ return -ENOENT;
}
struct ifeheadr {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index f85313d60a4d..bd8e86cb8743 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -192,7 +192,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
struct tcf_skbmod_params *p;
p = rcu_dereference_protected(d->skbmod_p, 1);
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index af47bdf2f483..901fb8bb9dce 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -141,6 +141,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
break;
default:
+ ret = -EINVAL;
goto err_out;
}
@@ -195,11 +196,12 @@ static void tunnel_key_release(struct tc_action *a, int bind)
struct tcf_tunnel_key_params *params;
params = rcu_dereference_protected(t->params, 1);
+ if (params) {
+ if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+ dst_release(&params->tcft_enc_metadata->dst);
- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
- dst_release(&params->tcft_enc_metadata->dst);
-
- kfree_rcu(params, rcu);
+ kfree_rcu(params, rcu);
+ }
}
static int tunnel_key_dump_addresses(struct sk_buff *skb,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 18e752439f6f..b57b4de73038 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
return f->next == &detached;
}
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+ return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+ if (head->first)
+ head->last->next = flow;
+ else
+ head->first = flow;
+ head->last = flow;
+ flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+ rb_erase(&f->rate_node, &q->delayed);
+ q->throttled_flows--;
+ fq_flow_add_tail(&q->old_flows, f);
+}
+
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
static struct kmem_cache *fq_flow_cachep __read_mostly;
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
- if (head->first)
- head->last->next = flow;
- else
- head->first = flow;
- head->last = flow;
- flow->next = NULL;
-}
/* limit number of collected flows per round */
#define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
f->socket_hash != sk->sk_hash)) {
f->credit = q->initial_quantum;
f->socket_hash = sk->sk_hash;
+ if (fq_flow_is_throttled(f))
+ fq_flow_unset_throttled(q, f);
f->time_next_packet = 0ULL;
}
return f;
@@ -430,9 +445,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
q->time_next_delayed_flow = f->time_next_packet;
break;
}
- rb_erase(p, &q->delayed);
- q->throttled_flows--;
- fq_flow_add_tail(&q->old_flows, f);
+ fq_flow_unset_throttled(q, f);
}
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 9f7b380cf0a3..e899d9eb76cb 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -462,7 +462,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* If a delay is expected, orphan the skb. (orphaning usually takes
* place at TX completion time, so _before_ the link transit delay)
*/
- if (q->latency || q->jitter)
+ if (q->latency || q->jitter || q->rate)
skb_orphan_partial(skb);
/*
@@ -513,7 +513,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_all(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
@@ -530,21 +530,31 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
now = psched_get_time();
if (q->rate) {
- struct sk_buff *last;
+ struct netem_skb_cb *last = NULL;
+
+ if (sch->q.tail)
+ last = netem_skb_cb(sch->q.tail);
+ if (q->t_root.rb_node) {
+ struct sk_buff *t_skb;
+ struct netem_skb_cb *t_last;
+
+ t_skb = netem_rb_to_skb(rb_last(&q->t_root));
+ t_last = netem_skb_cb(t_skb);
+ if (!last ||
+ t_last->time_to_send > last->time_to_send) {
+ last = t_last;
+ }
+ }
- if (sch->q.qlen)
- last = sch->q.tail;
- else
- last = netem_rb_to_skb(rb_last(&q->t_root));
if (last) {
/*
* Last packet in queue is reference point (now),
* calculate this time bonus and subtract
* from delay.
*/
- delay -= netem_skb_cb(last)->time_to_send - now;
+ delay -= last->time_to_send - now;
delay = max_t(psched_tdiff_t, 0, delay);
- now = netem_skb_cb(last)->time_to_send;
+ now = last->time_to_send;
}
delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f10d3397f917..738c55e994c4 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1006,9 +1006,10 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sctp_inq *inqueue;
- int state;
sctp_subtype_t subtype;
+ int first_time = 1; /* is this the first time through the loop */
int error = 0;
+ int state;
/* The association should be held so we should be safe. */
ep = asoc->ep;
@@ -1019,6 +1020,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
state = asoc->state;
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+ /* If the first chunk in the packet is AUTH, do special
+ * processing specified in Section 6.3 of SCTP-AUTH spec
+ */
+ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+ struct sctp_chunkhdr *next_hdr;
+
+ next_hdr = sctp_inq_peek(inqueue);
+ if (!next_hdr)
+ goto normal;
+
+ /* If the next chunk is COOKIE-ECHO, skip the AUTH
+ * chunk while saving a pointer to it so we can do
+ * Authentication later (during cookie-echo
+ * processing).
+ */
+ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+ chunk->auth_chunk = skb_clone(chunk->skb,
+ GFP_ATOMIC);
+ chunk->auth = 1;
+ continue;
+ }
+ }
+
+normal:
/* SCTP-AUTH, Section 6.3:
* The receiver has a list of chunk types which it expects
* to be received only after an AUTH-chunk. This list has
@@ -1057,6 +1082,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
/* If there is an error on chunk, discard this packet. */
if (error && chunk)
chunk->pdiscard = 1;
+
+ if (first_time)
+ first_time = 0;
}
sctp_association_put(asoc);
}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f731de3e8428..e06083c53f57 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -217,7 +217,7 @@ new_skb:
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
- if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 5d015270e454..f4d5efb1d231 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -324,8 +324,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
- if (!IS_ERR(bdst) &&
- ipv6_chk_addr(dev_net(bdst->dev),
+ if (IS_ERR(bdst))
+ continue;
+
+ if (ipv6_chk_addr(dev_net(bdst->dev),
&laddr->a.v6.sin6_addr, bdst->dev, 1)) {
if (!IS_ERR_OR_NULL(dst))
dst_release(dst);
@@ -334,8 +336,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
- if (matchlen > bmatchlen)
+ if (matchlen > bmatchlen) {
+ dst_release(bdst);
continue;
+ }
if (!IS_ERR_OR_NULL(dst))
dst_release(dst);
@@ -517,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
addr->v6.sin6_scope_id = 0;
}
-/* Compare addresses exactly.
- * v4-mapped-v6 is also in consideration.
- */
-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
- const union sctp_addr *addr2)
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
{
if (addr1->sa.sa_family != addr2->sa.sa_family) {
if (addr1->sa.sa_family == AF_INET &&
addr2->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
- if (addr2->v6.sin6_port == addr1->v4.sin_port &&
- addr2->v6.sin6_addr.s6_addr32[3] ==
- addr1->v4.sin_addr.s_addr)
- return 1;
- }
+ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+ addr2->v6.sin6_addr.s6_addr32[3] ==
+ addr1->v4.sin_addr.s_addr)
+ return 1;
+
if (addr2->sa.sa_family == AF_INET &&
addr1->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
- if (addr1->v6.sin6_port == addr2->v4.sin_port &&
- addr1->v6.sin6_addr.s6_addr32[3] ==
- addr2->v4.sin_addr.s_addr)
- return 1;
- }
+ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+ addr1->v6.sin6_addr.s6_addr32[3] ==
+ addr2->v4.sin_addr.s_addr)
+ return 1;
+
return 0;
}
- if (addr1->v6.sin6_port != addr2->v6.sin6_port)
- return 0;
+
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
+
/* If this is a linklocal address, compare the scope_id. */
- if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
- if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
- (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
- return 0;
- }
- }
+ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+ return 0;
return 1;
}
+/* Compare addresses exactly.
+ * v4-mapped-v6 is also in consideration.
+ */
+static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ return __sctp_v6_cmp_addr(addr1, addr2) &&
+ addr1->v6.sin6_port == addr2->v6.sin6_port;
+}
+
/* Initialize addr struct to INADDR_ANY. */
static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
{
@@ -723,8 +730,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
sctp_v6_map_v4(addr);
}
- if (addr->sa.sa_family == AF_INET)
+ if (addr->sa.sa_family == AF_INET) {
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
+ }
return sizeof(struct sockaddr_in6);
}
@@ -838,8 +847,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2,
struct sctp_sock *opt)
{
- struct sctp_af *af1, *af2;
struct sock *sk = sctp_opt2sk(opt);
+ struct sctp_af *af1, *af2;
af1 = sctp_get_af_specific(addr1->sa.sa_family);
af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -855,10 +864,10 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
return 1;
- if (addr1->sa.sa_family != addr2->sa.sa_family)
- return 0;
+ if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+ return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
- return af1->cmp_addr(addr1, addr2);
+ return __sctp_v6_cmp_addr(addr1, addr2);
}
/* Verify that the provided sockaddr looks bindable. Common verification,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 7b523e3f551f..fb7b7632316a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -510,22 +510,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (IS_ERR(rt))
continue;
- if (!dst)
- dst = &rt->dst;
-
/* Ensure the src address belongs to the output
* interface.
*/
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false);
if (!odev || odev->ifindex != fl4->flowi4_oif) {
- if (&rt->dst != dst)
+ if (!dst)
+ dst = &rt->dst;
+ else
dst_release(&rt->dst);
continue;
}
- if (dst != &rt->dst)
- dst_release(dst);
+ dst_release(dst);
dst = &rt->dst;
break;
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9e9690b7afe1..fc67d356b5fa 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1373,9 +1373,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
sctp_chunkhdr_t *chunk_hdr;
struct sk_buff *skb;
struct sock *sk;
+ int chunklen;
+
+ chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
+ if (chunklen > SCTP_MAX_CHUNK_LEN)
+ goto nodata;
/* No need to allocate LL here, as this is only a chunk. */
- skb = alloc_skb(SCTP_PAD4(sizeof(sctp_chunkhdr_t) + paylen), gfp);
+ skb = alloc_skb(chunklen, gfp);
if (!skb)
goto nodata;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8ec20a64a3f8..bfd068679710 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -144,10 +144,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
void *arg,
sctp_cmd_seq_t *commands);
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
- const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
struct sctp_chunk *chunk);
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
@@ -615,6 +613,38 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ struct sctp_chunk auth;
+
+ if (!chunk->auth_chunk)
+ return true;
+
+ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
+ * is supposed to be authenticated and we have to do delayed
+ * authentication. We've just recreated the association using
+ * the information in the cookie and now it's much easier to
+ * do the authentication.
+ */
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+ return false;
+
+ /* set-up our fake chunk so that we can process it */
+ auth.skb = chunk->auth_chunk;
+ auth.asoc = chunk->asoc;
+ auth.sctp_hdr = chunk->sctp_hdr;
+ auth.chunk_hdr = (struct sctp_chunkhdr *)
+ skb_push(chunk->auth_chunk,
+ sizeof(struct sctp_chunkhdr));
+ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+ auth.transport = chunk->transport;
+
+ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
/*
* Respond to a normal COOKIE ECHO chunk.
* We are the side that is being asked for an association.
@@ -751,36 +781,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
if (error)
goto nomem_init;
- /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
- * is supposed to be authenticated and we have to do delayed
- * authentication. We've just recreated the association using
- * the information in the cookie and now it's much easier to
- * do the authentication.
- */
- if (chunk->auth_chunk) {
- struct sctp_chunk auth;
- sctp_ierror_t ret;
-
- /* Make sure that we and the peer are AUTH capable */
- if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
-
- /* set-up our fake chunk so that we can process it */
- auth.skb = chunk->auth_chunk;
- auth.asoc = chunk->asoc;
- auth.sctp_hdr = chunk->sctp_hdr;
- auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
- sizeof(sctp_chunkhdr_t));
- skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
- auth.transport = chunk->transport;
-
- ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
- if (ret != SCTP_IERROR_NO_ERROR) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1717,13 +1720,15 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
GFP_ATOMIC))
goto nomem;
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
- if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
+ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
return SCTP_DISPOSITION_CONSUME;
- }
/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
* the peer has restarted (Action A), it MUST NOT setup a new
@@ -1828,6 +1833,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
GFP_ATOMIC))
goto nomem;
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1920,6 +1928,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
* a COOKIE ACK.
*/
+ if (!sctp_auth_chunk_verify(net, chunk, asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Don't accidentally move back into established state. */
if (asoc->state < SCTP_STATE_ESTABLISHED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -1959,7 +1970,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
}
}
- repl = sctp_make_cookie_ack(new_asoc, chunk);
+ repl = sctp_make_cookie_ack(asoc, chunk);
if (!repl)
goto nomem;
@@ -3981,10 +3992,8 @@ gen_shutdown:
*
* The return value is the disposition of the chunk.
*/
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
- const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
struct sctp_chunk *chunk)
{
struct sctp_authhdr *auth_hdr;
@@ -4083,7 +4092,7 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net,
commands);
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
- error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
+ error = sctp_sf_authenticate(asoc, chunk);
switch (error) {
case SCTP_IERROR_AUTH_BAD_HMAC:
/* Generate the ERROR chunk and discard the rest
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fd5b9d573b38..78f38056fca6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -335,11 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;
- /* V4 mapped address are really of AF_INET family */
- if (addr->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
- !opt->pf->af_supported(AF_INET, opt))
- return NULL;
+ if (addr->sa.sa_family == AF_INET6) {
+ if (len < SIN6_LEN_RFC2133)
+ return NULL;
+ /* V4 mapped address are really of AF_INET family */
+ if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+ !opt->pf->af_supported(AF_INET, opt))
+ return NULL;
+ }
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1519,7 +1522,7 @@ static void sctp_close(struct sock *sk, long timeout)
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
@@ -1569,7 +1572,7 @@ static void sctp_close(struct sock *sk, long timeout)
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
- bh_lock_sock(sk);
+ bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
@@ -4765,7 +4768,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
len = sizeof(int);
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
+ if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
return -EFAULT;
return 0;
}
@@ -5342,6 +5345,9 @@ copy_getaddrs:
err = -EFAULT;
goto out;
}
+ /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
+ * but we can't change it anymore.
+ */
if (put_user(bytes_copied, optlen))
err = -EFAULT;
out:
@@ -5778,7 +5784,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
- if (copy_from_user(&params, optval, sizeof(params)))
+ if (copy_from_user(&params, optval, len))
return -EFAULT;
} else
return -EINVAL;
@@ -5947,7 +5953,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
if (len < sizeof(struct sctp_authkeyid))
return -EINVAL;
- if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
+
+ len = sizeof(struct sctp_authkeyid);
+ if (copy_from_user(&val, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -5959,7 +5967,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
else
val.scact_keynumber = ep->active_key_id;
- len = sizeof(struct sctp_authkeyid);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
@@ -5985,7 +5992,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
to = p->gauth_chunks;
@@ -6030,7 +6037,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
to = p->gauth_chunks;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index bea00058ce35..6825e05a68b2 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -723,7 +723,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
return event;
fail_mark:
- sctp_chunk_put(chunk);
kfree_skb(skb);
fail:
return NULL;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index b5c279b22680..bbee334ab1b0 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -59,7 +59,7 @@ static void strp_abort_rx_strp(struct strparser *strp, int err)
strp->rx_stopped = 1;
/* Report an error on the lower socket */
- csk->sk_err = err;
+ csk->sk_err = -err;
csk->sk_error_report(csk);
}
@@ -285,9 +285,9 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
strp_start_rx_timer(strp);
}
+ rxm->accum_len += cand_len;
strp->rx_need_bytes = rxm->strp.full_len -
rxm->accum_len;
- rxm->accum_len += cand_len;
rxm->early_eaten = cand_len;
STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
desc->count = 0; /* Stop reading socket */
@@ -310,6 +310,7 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* Hurray, we have a new message! */
del_timer(&strp->rx_msg_timer);
strp->rx_skb_head = NULL;
+ strp->rx_need_bytes = 0;
STRP_STATS_INCR(strp->stats.rx_msgs);
/* Give skb to upper layer */
@@ -374,9 +375,7 @@ void strp_data_ready(struct strparser *strp)
return;
if (strp->rx_need_bytes) {
- if (strp_peek_len(strp) >= strp->rx_need_bytes)
- strp->rx_need_bytes = 0;
- else
+ if (strp_peek_len(strp) < strp->rx_need_bytes)
return;
}
@@ -422,7 +421,7 @@ static void strp_rx_msg_timeout(unsigned long arg)
/* Message assembly timed out */
STRP_STATS_INCR(strp->stats.rx_msg_timeouts);
lock_sock(strp->sk);
- strp->cb.abort_parser(strp, ETIMEDOUT);
+ strp->cb.abort_parser(strp, -ETIMEDOUT);
release_sock(strp->sk);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 79aec90259cd..4afd4149a632 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -237,9 +237,6 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
- err = crypto_ahash_init(req);
- if (err)
- goto out;
err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 61a504fb1ae2..34f94052c519 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
struct dentry *clnt_dir = pipe_dentry->d_parent;
struct dentry *gssd_dir = clnt_dir->d_parent;
+ dget(pipe_dentry);
__rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 69502fa68a3c..1a2b1f61ed26 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1054,6 +1054,7 @@ void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
cancel_delayed_work_sync(&buf->rb_recovery_worker);
+ cancel_delayed_work_sync(&buf->rb_refresh_worker);
while (!list_empty(&buf->rb_recv_bufs)) {
struct rpcrdma_rep *rep;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d24d14ea8ba4..1bf9153004cd 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2384,7 +2384,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EHOSTUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
- /* retry with existing socket, after a delay */
+ /*
+ * xs_tcp_force_close() wakes tasks with -EIO.
+ * We need to wake them first to ensure the
+ * correct error code.
+ */
+ xprt_wake_pending_tasks(xprt, status);
xs_tcp_force_close(xprt);
goto out;
}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 52d74760fb68..ca68db207965 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -322,6 +322,7 @@ restart:
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
+ kfree(b);
return -EINVAL;
}
@@ -345,8 +346,10 @@ restart:
if (skb)
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
- if (tipc_mon_create(net, bearer_id))
+ if (tipc_mon_create(net, bearer_id)) {
+ bearer_disable(net, b);
return -ENOMEM;
+ }
pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name,
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 9e109bb1a207..0fcfb3916dcf 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -633,9 +633,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *self;
struct tipc_peer *peer, *tmp;
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
tn->monitors[bearer_id] = NULL;
list_for_each_entry_safe(peer, tmp, &self->list, list) {
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 3200059d14b2..9ba3c462f86e 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -79,7 +79,8 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
};
const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 5b3e1ea37b6d..db8fbc076e1a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2094,6 +2094,8 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
int err;
msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg.skb)
+ return -ENOMEM;
msg.portid = info->snd_portid;
msg.seq = info->snd_seq;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 91722e97cdd5..36280e114959 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4081,7 +4081,7 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
struct nlattr *rate;
u32 bitrate;
u16 bitrate_compat;
- enum nl80211_attrs rate_flg;
+ enum nl80211_rate_info rate_flg;
rate = nla_nest_start(msg, attr);
if (!rate)
@@ -10777,7 +10777,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
break;
case NL80211_NAN_FUNC_FOLLOW_UP:
if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
- !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
+ !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
+ !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
err = -EINVAL;
goto out;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index c921c2eed15d..bb54d9db82df 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -663,7 +663,7 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
int offset, int len)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- const skb_frag_t *frag = &sh->frags[-1];
+ const skb_frag_t *frag = &sh->frags[0];
struct page *frag_page;
void *frag_ptr;
int frag_len, frag_size;
@@ -676,10 +676,10 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
while (offset >= frag_size) {
offset -= frag_size;
- frag++;
frag_page = skb_frag_page(frag);
frag_ptr = skb_frag_address(frag);
frag_size = skb_frag_size(frag);
+ frag++;
}
frag_ptr += offset;
@@ -691,12 +691,12 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
len -= cur_len;
while (len > 0) {
- frag++;
frag_len = skb_frag_size(frag);
cur_len = min(len, frag_len);
__frame_add_frag(frame, skb_frag_page(frag),
skb_frag_address(frag), cur_len, frag_len);
len -= cur_len;
+ frag++;
}
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f83b74d3e2ac..007721632b07 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1790,32 +1790,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
static int __init x25_init(void)
{
- int rc = proto_register(&x25_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&x25_proto, 0);
+ if (rc)
goto out;
rc = sock_register(&x25_family_ops);
- if (rc != 0)
+ if (rc)
goto out_proto;
dev_add_pack(&x25_packet_type);
rc = register_netdevice_notifier(&x25_dev_notifier);
- if (rc != 0)
+ if (rc)
goto out_sock;
- pr_info("Linux Version 0.2\n");
+ rc = x25_register_sysctl();
+ if (rc)
+ goto out_dev;
- x25_register_sysctl();
rc = x25_proc_init();
- if (rc != 0)
- goto out_dev;
+ if (rc)
+ goto out_sysctl;
+
+ pr_info("Linux Version 0.2\n");
+
out:
return rc;
+out_sysctl:
+ x25_unregister_sysctl();
out_dev:
unregister_netdevice_notifier(&x25_dev_notifier);
out_sock:
+ dev_remove_pack(&x25_packet_type);
sock_unregister(AF_X25);
out_proto:
proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 43239527a205..703d46aae7a2 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
{ 0, },
};
-void __init x25_register_sysctl(void)
+int __init x25_register_sysctl(void)
{
x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
+ if (!x25_table_header)
+ return -ENOMEM;
+ return 0;
}
void x25_unregister_sysctl(void)
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83..a00ec715aa46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
struct crypto_comp *tfm;
/* This can be any valid CPU ID so we don't need locking. */
- tfm = __this_cpu_read(*pos->tfms);
+ tfm = this_cpu_read(*pos->tfms);
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5e89b7461f99..5b8fa6832687 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1346,7 +1346,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
- struct net *net = xp_net(pol);
+ struct net *net = sock_net(sk);
struct xfrm_policy *old_pol;
#ifdef CONFIG_XFRM_SUB_POLICY
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 419bf5d463bd..71a94e549301 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1197,6 +1197,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
if (orig->aead) {
x->aead = xfrm_algo_aead_clone(orig->aead);
+ x->geniv = orig->geniv;
if (!x->aead)
goto error;
}
@@ -1246,6 +1247,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
+ x->replay = orig->replay;
+ x->preplay = orig->preplay;
return x;
@@ -1883,6 +1886,18 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+#endif
+
+ if (!optval && !optlen) {
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+ __sk_dst_reset(sk);
+ return 0;
+ }
+
if (optlen <= 0 || optlen > PAGE_SIZE)
return -EMSGSIZE;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 5d33967d9aa1..6a029358bfd1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
struct xfrm_replay_state_esn *rs;
- if (p->flags & XFRM_STATE_ESN) {
- if (!rt)
- return -EINVAL;
+ if (!rt)
+ return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
- rs = nla_data(rt);
+ rs = nla_data(rt);
- if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
- return -EINVAL;
-
- if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
- nla_len(rt) != sizeof(*rs))
- return -EINVAL;
- }
+ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+ return -EINVAL;
- if (!rt)
- return 0;
+ if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+ nla_len(rt) != sizeof(*rs))
+ return -EINVAL;
/* As only ESP and AH support ESN feature. */
if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014944..ae0f9ab1a70d 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -290,11 +290,11 @@ cmd_dt_S_dtb= \
echo '\#include <asm-generic/vmlinux.lds.h>'; \
echo '.section .dtb.init.rodata,"a"'; \
echo '.balign STRUCT_ALIGNMENT'; \
- echo '.global __dtb_$(*F)_begin'; \
- echo '__dtb_$(*F)_begin:'; \
+ echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
+ echo '__dtb_$(subst -,_,$(*F))_begin:'; \
echo '.incbin "$<" '; \
- echo '__dtb_$(*F)_end:'; \
- echo '.global __dtb_$(*F)_end'; \
+ echo '__dtb_$(subst -,_,$(*F))_end:'; \
+ echo '.global __dtb_$(subst -,_,$(*F))_end'; \
echo '.balign STRUCT_ALIGNMENT'; \
) > $@
diff --git a/scripts/tags.sh b/scripts/tags.sh
index a2ff3388e5ea..2a61db329adf 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@ all_compiled_sources()
case "$i" in
*.[cS])
j=${i/\.[cS]/\.o}
+ j="${j#$tree}"
if [ -e $j ]; then
echo $i
fi
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 7d3a98b2d55a..02cc952b86aa 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -707,7 +707,7 @@ module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
/* Maximum pathname length before accesses will start getting rejected */
unsigned int aa_g_path_max = 2 * PATH_MAX;
-module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR);
+module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
/* Determines how paranoid loading of policy is and how much verification
* on the loaded policy is done.
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 6830d2427e47..7bf8b005a178 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -207,7 +207,8 @@ int ima_appraise_measurement(enum ima_hooks func,
if (opened & FILE_CREATED)
iint->flags |= IMA_NEW_FILE;
if ((iint->flags & IMA_NEW_FILE) &&
- !(iint->flags & IMA_DIGSIG_REQUIRED))
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
status = INTEGRITY_PASS;
goto out;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c2da45ae5b2a..8ded80867b92 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -406,18 +406,6 @@ static void superblock_free_security(struct super_block *sb)
kfree(sbsec);
}
-/* The file system's label must be initialized prior to use. */
-
-static const char *labeling_behaviors[7] = {
- "uses xattr",
- "uses transition SIDs",
- "uses task SIDs",
- "uses genfs_contexts",
- "not configured for labeling",
- "uses mountpoint labeling",
- "uses native labeling",
-};
-
static inline int inode_doinit(struct inode *inode)
{
return inode_doinit_with_dentry(inode, NULL);
@@ -528,10 +516,6 @@ static int sb_finish_set_opts(struct super_block *sb)
}
}
- if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
- sb->s_id, sb->s_type->name);
-
sbsec->flags |= SE_SBINITIALIZED;
if (selinux_is_sblabel_mnt(sb))
sbsec->flags |= SBLABEL_MNT;
@@ -2049,8 +2033,9 @@ static inline u32 file_to_av(struct file *file)
static inline u32 open_file_to_av(struct file *file)
{
u32 av = file_to_av(file);
+ struct inode *inode = file_inode(file);
- if (selinux_policycap_openperm)
+ if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC)
av |= FILE__OPEN;
return av;
@@ -3047,6 +3032,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
+ struct inode *inode = d_backing_inode(dentry);
unsigned int ia_valid = iattr->ia_valid;
__u32 av = FILE__WRITE;
@@ -3062,8 +3048,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
- if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
- && !(ia_valid & ATTR_FILE))
+ if (selinux_policycap_openperm &&
+ inode->i_sb->s_magic != SOCKFS_MAGIC &&
+ (ia_valid & ATTR_SIZE) &&
+ !(ia_valid & ATTR_FILE))
av |= FILE__OPEN;
return dentry_has_perm(cred, dentry, av);
@@ -4328,10 +4316,18 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
u32 sid, node_perm;
if (family == PF_INET) {
+ if (addrlen < sizeof(struct sockaddr_in)) {
+ err = -EINVAL;
+ goto out;
+ }
addr4 = (struct sockaddr_in *)address;
snum = ntohs(addr4->sin_port);
addrp = (char *)&addr4->sin_addr.s_addr;
} else {
+ if (addrlen < SIN6_LEN_RFC2133) {
+ err = -EINVAL;
+ goto out;
+ }
addr6 = (struct sockaddr_in6 *)address;
snum = ntohs(addr6->sin6_port);
addrp = (char *)&addr6->sin6_addr.s6_addr;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 73275a92f2e2..d656b7c98394 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -155,7 +155,7 @@ static int selinux_set_mapping(struct policydb *pol,
}
k = 0;
- while (p_in->perms && p_in->perms[k]) {
+ while (p_in->perms[k]) {
/* An empty permission string skips ahead */
if (!*p_in->perms[k]) {
k++;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 3321348fd86b..cfb8f5896787 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -834,8 +834,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
}
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
- bool trylock)
+/* parameter locking: returns immediately if tried during streaming */
+static int lock_params(struct snd_pcm_runtime *runtime)
+{
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
+ if (atomic_read(&runtime->oss.rw_ref)) {
+ mutex_unlock(&runtime->oss.params_lock);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void unlock_params(struct snd_pcm_runtime *runtime)
+{
+ mutex_unlock(&runtime->oss.params_lock);
+}
+
+/* call with params_lock held */
+static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hw_params *params, *sparams;
@@ -849,11 +866,8 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
struct snd_mask sformat_mask;
struct snd_mask mask;
- if (trylock) {
- if (!(mutex_trylock(&runtime->oss.params_lock)))
- return -EAGAIN;
- } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
- return -EINTR;
+ if (!runtime->oss.params)
+ return 0;
sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
params = kmalloc(sizeof(*params), GFP_KERNEL);
sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
@@ -1079,6 +1093,23 @@ failure:
kfree(sw_params);
kfree(params);
kfree(sparams);
+ return err;
+}
+
+/* this one takes the lock by itself */
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ bool trylock)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ if (trylock) {
+ if (!(mutex_trylock(&runtime->oss.params_lock)))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
+
+ err = snd_pcm_oss_change_params_locked(substream);
mutex_unlock(&runtime->oss.params_lock);
return err;
}
@@ -1107,6 +1138,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
return 0;
}
+/* call with params_lock held */
+/* NOTE: this always call PREPARE unconditionally no matter whether
+ * runtime->oss.prepare is set or not
+ */
static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
{
int err;
@@ -1131,8 +1166,6 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
int err;
- if (substream == NULL)
- return 0;
runtime = substream->runtime;
if (runtime->oss.params) {
err = snd_pcm_oss_change_params(substream, false);
@@ -1140,6 +1173,29 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
return err;
}
if (runtime->oss.prepare) {
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
+ err = snd_pcm_oss_prepare(substream);
+ mutex_unlock(&runtime->oss.params_lock);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+/* call with params_lock held */
+static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+ int err;
+
+ runtime = substream->runtime;
+ if (runtime->oss.params) {
+ err = snd_pcm_oss_change_params_locked(substream);
+ if (err < 0)
+ return err;
+ }
+ if (runtime->oss.prepare) {
err = snd_pcm_oss_prepare(substream);
if (err < 0)
return err;
@@ -1361,19 +1417,21 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes)
{
size_t xfer = 0;
- ssize_t tmp;
+ ssize_t tmp = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
return -ENXIO;
- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
- return tmp;
+ atomic_inc(&runtime->oss.rw_ref);
while (bytes > 0) {
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
tmp = -ERESTARTSYS;
break;
}
+ tmp = snd_pcm_oss_make_ready_locked(substream);
+ if (tmp < 0)
+ goto err;
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
tmp = bytes;
if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1429,6 +1487,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
}
tmp = 0;
}
+ atomic_dec(&runtime->oss.rw_ref);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
}
@@ -1468,19 +1527,21 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes)
{
size_t xfer = 0;
- ssize_t tmp;
+ ssize_t tmp = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
return -ENXIO;
- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
- return tmp;
+ atomic_inc(&runtime->oss.rw_ref);
while (bytes > 0) {
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
tmp = -ERESTARTSYS;
break;
}
+ tmp = snd_pcm_oss_make_ready_locked(substream);
+ if (tmp < 0)
+ goto err;
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
if (runtime->oss.buffer_used == 0) {
tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1521,6 +1582,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
}
tmp = 0;
}
+ atomic_dec(&runtime->oss.rw_ref);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
}
@@ -1536,10 +1598,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
continue;
runtime = substream->runtime;
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ mutex_lock(&runtime->oss.params_lock);
runtime->oss.prepare = 1;
runtime->oss.buffer_used = 0;
runtime->oss.prev_hw_ptr_period = 0;
runtime->oss.period_ptr = 0;
+ mutex_unlock(&runtime->oss.params_lock);
}
return 0;
}
@@ -1625,9 +1689,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
goto __direct;
if ((err = snd_pcm_oss_make_ready(substream)) < 0)
return err;
+ atomic_inc(&runtime->oss.rw_ref);
+ if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ atomic_dec(&runtime->oss.rw_ref);
+ return -ERESTARTSYS;
+ }
format = snd_pcm_oss_format_from(runtime->oss.format);
width = snd_pcm_format_physical_width(format);
- mutex_lock(&runtime->oss.params_lock);
if (runtime->oss.buffer_used > 0) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm, "sync: buffer_used\n");
@@ -1637,10 +1705,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime->oss.buffer + runtime->oss.buffer_used,
size);
err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
- if (err < 0) {
- mutex_unlock(&runtime->oss.params_lock);
- return err;
- }
+ if (err < 0)
+ goto unlock;
} else if (runtime->oss.period_ptr > 0) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm, "sync: period_ptr\n");
@@ -1650,10 +1716,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime->oss.buffer,
size * 8 / width);
err = snd_pcm_oss_sync1(substream, size);
- if (err < 0) {
- mutex_unlock(&runtime->oss.params_lock);
- return err;
- }
+ if (err < 0)
+ goto unlock;
}
/*
* The ALSA's period might be a bit large than OSS one.
@@ -1684,7 +1748,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
snd_pcm_lib_writev(substream, buffers, size);
}
}
+unlock:
mutex_unlock(&runtime->oss.params_lock);
+ atomic_dec(&runtime->oss.rw_ref);
+ if (err < 0)
+ return err;
/*
* finish sync: drain the buffer
*/
@@ -1695,7 +1763,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
substream->f_flags = saved_f_flags;
if (err < 0)
return err;
+ mutex_lock(&runtime->oss.params_lock);
runtime->oss.prepare = 1;
+ mutex_unlock(&runtime->oss.params_lock);
}
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
@@ -1706,8 +1776,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
if (err < 0)
return err;
+ mutex_lock(&runtime->oss.params_lock);
runtime->oss.buffer_used = 0;
runtime->oss.prepare = 1;
+ mutex_unlock(&runtime->oss.params_lock);
}
return 0;
}
@@ -1719,6 +1791,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
struct snd_pcm_runtime *runtime;
+ int err;
+
if (substream == NULL)
continue;
runtime = substream->runtime;
@@ -1726,10 +1800,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
rate = 1000;
else if (rate > 192000)
rate = 192000;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
if (runtime->oss.rate != rate) {
runtime->oss.params = 1;
runtime->oss.rate = rate;
}
+ unlock_params(runtime);
}
return snd_pcm_oss_get_rate(pcm_oss_file);
}
@@ -1754,13 +1832,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
struct snd_pcm_runtime *runtime;
+ int err;
+
if (substream == NULL)
continue;
runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
if (runtime->oss.channels != channels) {
runtime->oss.params = 1;
runtime->oss.channels = channels;
}
+ unlock_params(runtime);
}
return snd_pcm_oss_get_channels(pcm_oss_file);
}
@@ -1814,10 +1898,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
return -ENOMEM;
_snd_pcm_hw_params_any(params);
err = snd_pcm_hw_refine(substream, params);
- format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- kfree(params);
if (err < 0)
- return err;
+ goto error;
+ format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
for (fmt = 0; fmt < 32; ++fmt) {
if (snd_mask_test(&format_mask, fmt)) {
int f = snd_pcm_oss_format_to(fmt);
@@ -1825,12 +1908,16 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
formats |= f;
}
}
- return formats;
+
+ error:
+ kfree(params);
+ return err < 0 ? err : formats;
}
static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
{
int formats, idx;
+ int err;
if (format != AFMT_QUERY) {
formats = snd_pcm_oss_get_formats(pcm_oss_file);
@@ -1844,10 +1931,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
if (substream == NULL)
continue;
runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
if (runtime->oss.format != format) {
runtime->oss.params = 1;
runtime->oss.format = format;
}
+ unlock_params(runtime);
}
}
return snd_pcm_oss_get_format(pcm_oss_file);
@@ -1867,8 +1958,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
{
struct snd_pcm_runtime *runtime;
- if (substream == NULL)
- return 0;
runtime = substream->runtime;
if (subdivide == 0) {
subdivide = runtime->oss.subdivision;
@@ -1892,9 +1981,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ struct snd_pcm_runtime *runtime;
+
if (substream == NULL)
continue;
- if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
+ runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
+ err = snd_pcm_oss_set_subdivide1(substream, subdivide);
+ unlock_params(runtime);
+ if (err < 0)
return err;
}
return err;
@@ -1904,8 +2001,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
{
struct snd_pcm_runtime *runtime;
- if (substream == NULL)
- return 0;
runtime = substream->runtime;
if (runtime->oss.subdivision || runtime->oss.fragshift)
return -EINVAL;
@@ -1925,9 +2020,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ struct snd_pcm_runtime *runtime;
+
if (substream == NULL)
continue;
- if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
+ runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
+ err = snd_pcm_oss_set_fragment1(substream, val);
+ unlock_params(runtime);
+ if (err < 0)
return err;
}
return err;
@@ -2011,6 +2114,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
}
if (psubstream) {
runtime = psubstream->runtime;
+ cmd = 0;
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
if (trigger & PCM_ENABLE_OUTPUT) {
if (runtime->oss.trigger)
goto _skip1;
@@ -2028,13 +2134,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
cmd = SNDRV_PCM_IOCTL_DROP;
runtime->oss.prepare = 1;
}
- err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
- if (err < 0)
- return err;
- }
_skip1:
+ mutex_unlock(&runtime->oss.params_lock);
+ if (cmd) {
+ err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
+ if (err < 0)
+ return err;
+ }
+ }
if (csubstream) {
runtime = csubstream->runtime;
+ cmd = 0;
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
if (trigger & PCM_ENABLE_INPUT) {
if (runtime->oss.trigger)
goto _skip2;
@@ -2049,11 +2161,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
cmd = SNDRV_PCM_IOCTL_DROP;
runtime->oss.prepare = 1;
}
- err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
- if (err < 0)
- return err;
- }
_skip2:
+ mutex_unlock(&runtime->oss.params_lock);
+ if (cmd) {
+ err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
+ if (err < 0)
+ return err;
+ }
+ }
return 0;
}
@@ -2305,6 +2420,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
runtime->oss.maxfrags = 0;
runtime->oss.subdivision = 0;
substream->pcm_release = snd_pcm_oss_release_substream;
+ atomic_set(&runtime->oss.rw_ref, 0);
}
static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 074363b63cc4..6bda8f6c5f84 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -28,6 +28,7 @@
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/pcm.h>
+#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -1025,8 +1026,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
snd_free_pages((void*)runtime->control,
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
kfree(runtime->hw_constraints.rules);
- kfree(runtime);
+ /* Avoid concurrent access to runtime via PCM timer interface */
+ if (substream->timer)
+ spin_lock_irq(&substream->timer->lock);
substream->runtime = NULL;
+ if (substream->timer)
+ spin_unlock_irq(&substream->timer->lock);
+ kfree(runtime);
put_pid(substream->pid);
substream->pid = NULL;
substream->pstr->substream_opened--;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 1f64ab0c2a95..7ae080bae15c 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -426,6 +426,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
+ return -EBADFD;
if ((ch = substream->runtime->channels) > 128)
return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 3d307bda86f9..8313482add35 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2729,6 +2729,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
sync_ptr.s.status.hw_ptr = status->hw_ptr;
sync_ptr.s.status.tstamp = status->tstamp;
sync_ptr.s.status.suspended_state = status->suspended_state;
+ sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
@@ -3410,7 +3411,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
- area->vm_end - area->vm_start);
+ substream->runtime->dma_bytes);
#endif /* CONFIG_X86 */
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index f69764d7cdd7..e30e30ba6e39 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
struct snd_rawmidi_params params;
unsigned int val;
- if (rfile->output == NULL)
- return -EINVAL;
if (get_user(params.stream, &src->stream) ||
get_user(params.buffer_size, &src->buffer_size) ||
get_user(params.avail_min, &src->avail_min) ||
@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
params.no_active_sensing = val;
switch (params.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ if (!rfile->output)
+ return -EINVAL;
return snd_rawmidi_output_params(rfile->output, &params);
case SNDRV_RAWMIDI_STREAM_INPUT:
+ if (!rfile->input)
+ return -EINVAL;
return snd_rawmidi_input_params(rfile->input, &params);
}
return -EINVAL;
@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
int err;
struct snd_rawmidi_status status;
- if (rfile->output == NULL)
- return -EINVAL;
if (get_user(status.stream, &src->stream))
return -EFAULT;
switch (status.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ if (!rfile->output)
+ return -EINVAL;
err = snd_rawmidi_output_status(rfile->output, &status);
break;
case SNDRV_RAWMIDI_STREAM_INPUT:
+ if (!rfile->input)
+ return -EINVAL;
err = snd_rawmidi_input_status(rfile->input, &status);
break;
default:
@@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
int err;
struct snd_rawmidi_status status;
- if (rfile->output == NULL)
- return -EINVAL;
if (get_user(status.stream, &src->stream))
return -EFAULT;
switch (status.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ if (!rfile->output)
+ return -EINVAL;
err = snd_rawmidi_output_status(rfile->output, &status);
break;
case SNDRV_RAWMIDI_STREAM_INPUT:
+ if (!rfile->input)
+ return -EINVAL;
err = snd_rawmidi_input_status(rfile->input, &status);
break;
default:
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c3908862bc8b..86ca584c27b2 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
#include <sound/seq_oss_legacy.h>
#include "seq_oss_readq.h"
#include "seq_oss_writeq.h"
+#include <linux/nospec.h>
/*
@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (note == 255 && info->ch[ch].note >= 0) {
/* volume control */
int type;
@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (info->ch[ch].note >= 0) {
note = info->ch[ch].note;
info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
static int
set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
static int
set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b2139e3f0..9debd1b8fd28 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
#include "../seq_lock.h"
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_mididev)
return NULL;
+ dev = array_index_nospec(dev, dp->max_mididev);
return get_mdev(dev);
}
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index cd0e0ebbfdb1..278ebb993122 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
* constants
@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
dp->max_synthdev = 0;
}
-/*
- * check if the specified device is MIDI mapped device
- */
-static int
-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
+static struct seq_oss_synthinfo *
+get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_synthdev)
- return 0;
- if (dp->synths[dev].is_midi)
- return 1;
- return 0;
+ return NULL;
+ dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
+ return &dp->synths[dev];
}
/*
@@ -359,14 +356,20 @@ static struct seq_oss_synth *
get_synthdev(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
- if (dev < 0 || dev >= dp->max_synthdev)
- return NULL;
- if (! dp->synths[dev].opened)
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
+
+ if (!info)
return NULL;
- if (dp->synths[dev].is_midi)
- return &midi_synth_dev;
- if ((rec = get_sdev(dev)) == NULL)
+ if (!info->opened)
return NULL;
+ if (info->is_midi) {
+ rec = &midi_synth_dev;
+ snd_use_lock_use(&rec->use_lock);
+ } else {
+ rec = get_sdev(dev);
+ if (!rec)
+ return NULL;
+ }
if (! rec->opened) {
snd_use_lock_free(&rec->use_lock);
return NULL;
@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
struct seq_oss_synth *rec;
struct seq_oss_synthinfo *info;
- if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
- return;
- info = &dp->synths[dev];
- if (! info->opened)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || !info->opened)
return;
if (info->sysex)
info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (dev < 0 || dev >= dp->max_synthdev)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info)
return -ENXIO;
- if (is_midi_dev(dp, dev))
+ if (info->is_midi)
return 0;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
if (rec->oper.load_patch == NULL)
rc = -ENXIO;
else
- rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
+ rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
snd_use_lock_free(&rec->use_lock);
return rc;
}
/*
- * check if the device is valid synth device
+ * check if the device is valid synth device and return the synth info
*/
-int
-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
+struct seq_oss_synthinfo *
+snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
+
rec = get_synthdev(dp, dev);
if (rec) {
snd_use_lock_free(&rec->use_lock);
- return 1;
+ return get_synthinfo_nospec(dp, dev);
}
- return 0;
+ return NULL;
}
@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
int i, send;
unsigned char *dest;
struct seq_oss_synth_sysex *sysex;
+ struct seq_oss_synthinfo *info;
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- sysex = dp->synths[dev].sysex;
+ sysex = info->sysex;
if (sysex == NULL) {
sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
if (sysex == NULL)
return -ENOMEM;
- dp->synths[dev].sysex = sysex;
+ info->sysex = sysex;
}
send = 0;
@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
int
snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
+
+ if (!info)
return -EINVAL;
- snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
- dp->synths[dev].arg.addr.port);
+ snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
+ info->arg.addr.port);
return 0;
}
@@ -568,16 +576,18 @@ int
snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (is_midi_dev(dp, dev))
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
if (rec->oper.ioctl == NULL)
rc = -ENXIO;
else
- rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
+ rc = rec->oper.ioctl(&info->arg, cmd, addr);
snd_use_lock_free(&rec->use_lock);
return rc;
}
@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
int
snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
+ struct seq_oss_synthinfo *info;
+
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
ev->type = SNDRV_SEQ_EVENT_OSS;
memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f166b6..a63f9e22974d 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c);
-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
+struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
+ int dev);
int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
struct snd_seq_event *ev);
int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 0b408617b2c9..ecd1c5fc8db8 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -255,12 +255,12 @@ static int seq_free_client1(struct snd_seq_client *client)
if (!client)
return 0;
- snd_seq_delete_all_ports(client);
- snd_seq_queue_client_leave(client->number);
spin_lock_irqsave(&clients_lock, flags);
clienttablock[client->number] = 1;
clienttab[client->number] = NULL;
spin_unlock_irqrestore(&clients_lock, flags);
+ snd_seq_delete_all_ports(client);
+ snd_seq_queue_client_leave(client->number);
snd_use_lock_sync(&client->use_lock);
snd_seq_queue_client_termination(client->number);
if (client->pool)
@@ -906,7 +906,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
struct snd_seq_event *event,
struct file *file, int blocking,
- int atomic, int hop)
+ int atomic, int hop,
+ struct mutex *mutexp)
{
struct snd_seq_event_cell *cell;
int err;
@@ -944,7 +945,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
return -ENXIO; /* queue is not allocated */
/* allocate an event cell */
- err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
+ err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
+ file, mutexp);
if (err < 0)
return err;
@@ -1013,12 +1015,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
return -ENXIO;
/* allocate the pool now if the pool is not allocated yet */
+ mutex_lock(&client->ioctl_mutex);
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
- mutex_lock(&client->ioctl_mutex);
err = snd_seq_pool_init(client->pool);
- mutex_unlock(&client->ioctl_mutex);
if (err < 0)
- return -ENOMEM;
+ goto out;
}
/* only process whole events */
@@ -1069,7 +1070,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/* ok, enqueue it */
err = snd_seq_client_enqueue_event(client, &event, file,
!(file->f_flags & O_NONBLOCK),
- 0, 0);
+ 0, 0, &client->ioctl_mutex);
if (err < 0)
break;
@@ -1080,6 +1081,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
written += len;
}
+ out:
+ mutex_unlock(&client->ioctl_mutex);
return written ? written : err;
}
@@ -1835,6 +1838,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
(! snd_seq_write_pool_allocated(client) ||
info->output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
+ /* is the pool in use? */
+ if (atomic_read(&client->pool->counter))
+ return -EBUSY;
/* remove all existing cells */
snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
@@ -2259,7 +2265,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
if (! cptr->accept_output)
result = -EPERM;
else /* send it */
- result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
+ result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
+ atomic, hop, NULL);
snd_seq_client_unlock(cptr);
return result;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 3490d21ab9e7..9acbed1ac982 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
return -EINVAL;
snd_use_lock_use(&f->use_lock);
- err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
+ err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
if (err < 0) {
if ((err == -ENOMEM) || (err == -EAGAIN))
atomic_inc(&f->overflow);
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 5847c4475bf3..4c8cbcd89887 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
*/
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
struct snd_seq_event_cell **cellp,
- int nonblock, struct file *file)
+ int nonblock, struct file *file,
+ struct mutex *mutexp)
{
struct snd_seq_event_cell *cell;
unsigned long flags;
@@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&pool->output_sleep, &wait);
spin_unlock_irq(&pool->lock);
+ if (mutexp)
+ mutex_unlock(mutexp);
schedule();
+ if (mutexp)
+ mutex_lock(mutexp);
spin_lock_irq(&pool->lock);
remove_wait_queue(&pool->output_sleep, &wait);
/* interrupted? */
@@ -288,7 +293,7 @@ __error:
*/
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
struct snd_seq_event_cell **cellp, int nonblock,
- struct file *file)
+ struct file *file, struct mutex *mutexp)
{
int ncells, err;
unsigned int extlen;
@@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
if (ncells >= pool->total_elements)
return -ENOMEM;
- err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
+ err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
if (err < 0)
return err;
@@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
int size = sizeof(struct snd_seq_event);
if (len < size)
size = len;
- err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
+ err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
+ mutexp);
if (err < 0)
goto __error;
if (cell->event.data.ext.ptr == NULL)
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 32f959c17786..3abe306c394a 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -66,7 +66,8 @@ struct snd_seq_pool {
void snd_seq_cell_free(struct snd_seq_event_cell *cell);
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
- struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
+ struct snd_seq_event_cell **cellp, int nonblock,
+ struct file *file, struct mutex *mutexp);
/* return number of unused (free) cells */
static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
index bc1c8488fc2a..2bc6759e4adc 100644
--- a/sound/core/seq/seq_prioq.c
+++ b/sound/core/seq/seq_prioq.c
@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo)
if (f->cells > 0) {
/* drain prioQ */
while (f->cells > 0)
- snd_seq_cell_free(snd_seq_prioq_cell_out(f));
+ snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL));
}
kfree(f);
@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f,
return 0;
}
+/* return 1 if the current time >= event timestamp */
+static int event_is_ready(struct snd_seq_event *ev, void *current_time)
+{
+ if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK)
+ return snd_seq_compare_tick_time(current_time, &ev->time.tick);
+ else
+ return snd_seq_compare_real_time(current_time, &ev->time.time);
+}
+
/* dequeue cell from prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
+struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
+ void *current_time)
{
struct snd_seq_event_cell *cell;
unsigned long flags;
@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
spin_lock_irqsave(&f->lock, flags);
cell = f->head;
+ if (cell && current_time && !event_is_ready(&cell->event, current_time))
+ cell = NULL;
if (cell) {
f->head = cell->next;
@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f)
return f->cells;
}
-
-/* peek at cell at the head of the prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f)
-{
- if (f == NULL) {
- pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n");
- return NULL;
- }
- return f->head;
-}
-
-
static inline int prioq_match(struct snd_seq_event_cell *cell,
int client, int timestamp)
{
diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h
index d38bb78d9345..2c315ca10fc4 100644
--- a/sound/core/seq/seq_prioq.h
+++ b/sound/core/seq/seq_prioq.h
@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo);
int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell);
/* dequeue cell from prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f);
+struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
+ void *current_time);
/* return number of events available in prioq */
int snd_seq_prioq_avail(struct snd_seq_prioq *f);
-/* peek at cell at the head of the prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f);
-
/* client left queue */
void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp);
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 79e0c5604ef8..1a6dc4ff44a6 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
__again:
/* Process tick queue... */
- while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
- if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
- &cell->event.time.tick)) {
- cell = snd_seq_prioq_cell_out(q->tickq);
- if (cell)
- snd_seq_dispatch_event(cell, atomic, hop);
- } else {
- /* event remains in the queue */
+ for (;;) {
+ cell = snd_seq_prioq_cell_out(q->tickq,
+ &q->timer->tick.cur_tick);
+ if (!cell)
break;
- }
+ snd_seq_dispatch_event(cell, atomic, hop);
}
-
/* Process time queue... */
- while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
- if (snd_seq_compare_real_time(&q->timer->cur_time,
- &cell->event.time.time)) {
- cell = snd_seq_prioq_cell_out(q->timeq);
- if (cell)
- snd_seq_dispatch_event(cell, atomic, hop);
- } else {
- /* event remains in the queue */
+ for (;;) {
+ cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
+ if (!cell)
break;
- }
+ snd_seq_dispatch_event(cell, atomic, hop);
}
/* free lock */
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 200764948ed1..8bdc4c961f04 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -174,12 +174,12 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
}
return;
}
+ spin_lock_irqsave(&substream->runtime->lock, flags);
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- return;
+ goto out;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
- spin_lock_irqsave(&substream->runtime->lock, flags);
while (1) {
count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index cbd20cb8ca11..847f70348d4d 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -192,6 +192,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
dpcm->timer.expires = 0;
}
+static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
+{
+ del_timer_sync(&dpcm->timer);
+}
+
#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE)
#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
@@ -291,6 +296,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
cable->pause |= stream;
loopback_timer_stop(dpcm);
spin_unlock(&cable->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ loopback_active_notify(dpcm);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -299,6 +306,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
cable->pause &= ~stream;
loopback_timer_start(dpcm);
spin_unlock(&cable->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ loopback_active_notify(dpcm);
break;
default:
return -EINVAL;
@@ -326,6 +335,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
struct loopback_cable *cable = dpcm->cable;
int bps, salign;
+ loopback_timer_stop_sync(dpcm);
+
salign = (snd_pcm_format_width(runtime->format) *
runtime->channels) / 8;
bps = salign * runtime->rate;
@@ -659,7 +670,9 @@ static void free_cable(struct snd_pcm_substream *substream)
return;
if (cable->streams[!substream->stream]) {
/* other stream is still alive */
+ spin_lock_irq(&cable->lock);
cable->streams[substream->stream] = NULL;
+ spin_unlock_irq(&cable->lock);
} else {
/* free the cable */
loopback->cables[substream->number][dev] = NULL;
@@ -699,7 +712,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
loopback->cables[substream->number][dev] = cable;
}
dpcm->cable = cable;
- cable->streams[substream->stream] = dpcm;
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
@@ -731,6 +743,11 @@ static int loopback_open(struct snd_pcm_substream *substream)
runtime->hw = loopback_pcm_hardware;
else
runtime->hw = cable->hw;
+
+ spin_lock_irq(&cable->lock);
+ cable->streams[substream->stream] = dpcm;
+ spin_unlock_irq(&cable->lock);
+
unlock:
if (err < 0) {
free_cable(substream);
@@ -745,7 +762,7 @@ static int loopback_close(struct snd_pcm_substream *substream)
struct loopback *loopback = substream->private_data;
struct loopback_pcm *dpcm = substream->runtime->private_data;
- loopback_timer_stop(dpcm);
+ loopback_timer_stop_sync(dpcm);
mutex_lock(&loopback->cable_lock);
free_cable(substream);
mutex_unlock(&loopback->cable_lock);
@@ -815,9 +832,11 @@ static int loopback_rate_shift_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].rate_shift;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -849,9 +868,11 @@ static int loopback_notify_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -863,12 +884,14 @@ static int loopback_notify_put(struct snd_kcontrol *kcontrol,
int change = 0;
val = ucontrol->value.integer.value[0] ? 1 : 0;
+ mutex_lock(&loopback->cable_lock);
if (val != loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify) {
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify = val;
change = 1;
}
+ mutex_unlock(&loopback->cable_lock);
return change;
}
@@ -876,13 +899,18 @@ static int loopback_active_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
- struct loopback_cable *cable = loopback->cables
- [kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+ struct loopback_cable *cable;
+
unsigned int val = 0;
- if (cable != NULL)
- val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
- 1 : 0;
+ mutex_lock(&loopback->cable_lock);
+ cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+ if (cable != NULL) {
+ unsigned int running = cable->running ^ cable->pause;
+
+ val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
+ }
+ mutex_unlock(&loopback->cable_lock);
ucontrol->value.integer.value[0] = val;
return 0;
}
@@ -925,9 +953,11 @@ static int loopback_rate_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].rate;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -947,9 +977,11 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].channels;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a325a61..42920a243328 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/nospec.h>
#include <sound/opl3.h>
#include <sound/asound_fm.h>
@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
{
unsigned short reg_side;
unsigned char op_offset;
- unsigned char voice_offset;
+ unsigned char voice_offset, voice_op;
unsigned short opl3_reg;
unsigned char reg_val;
@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
voice_offset = voice->voice - MAX_OPL2_VOICES;
}
/* Get register offset of operator */
- op_offset = snd_opl3_regmap[voice_offset][voice->op];
+ voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
+ voice_op = array_index_nospec(voice->op, 4);
+ op_offset = snd_opl3_regmap[voice_offset][voice_op];
reg_val = 0x00;
/* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9741757436be..d0ad61f563e2 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -471,8 +471,9 @@ static int handle_in_packet(struct amdtp_stream *s,
* This module supports 'Two-quadlet CIP header with SYT field'.
* For convenience, also check FMT field is AM824 or not.
*/
- if (((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
- ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) {
+ if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
+ ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
+ (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
dev_info_ratelimited(&s->unit->device,
"Invalid CIP header for AMDTP: %08X:%08X\n",
cip_header[0], cip_header[1]);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index f7c054bc9d92..8136bd20c8b1 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -29,6 +29,8 @@
* @CIP_JUMBO_PAYLOAD: Only for in-stream. The number of data blocks in an
* packet is larger than IEC 61883-6 defines. Current implementation
* allows 5 times as large as IEC 61883-6 defines.
+ * @CIP_HEADER_WITHOUT_EOH: Only for in-stream. CIP Header doesn't include
+ * valid EOH.
*/
enum cip_flags {
CIP_NONBLOCKING = 0x00,
@@ -39,6 +41,7 @@ enum cip_flags {
CIP_SKIP_DBC_ZERO_CHECK = 0x10,
CIP_EMPTY_HAS_WRONG_DBC = 0x20,
CIP_JUMBO_PAYLOAD = 0x40,
+ CIP_HEADER_WITHOUT_EOH = 0x80,
};
/**
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index ec4db3a514fc..257cfbfadb4a 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -425,7 +425,7 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
err = init_stream(dice, AMDTP_IN_STREAM, i);
if (err < 0) {
for (; i >= 0; i--)
- destroy_stream(dice, AMDTP_OUT_STREAM, i);
+ destroy_stream(dice, AMDTP_IN_STREAM, i);
goto end;
}
}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 25e9f77275c4..0d3d36fb1540 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -14,7 +14,7 @@ MODULE_LICENSE("GPL v2");
#define OUI_WEISS 0x001c6a
#define OUI_LOUD 0x000ff2
#define OUI_FOCUSRITE 0x00130e
-#define OUI_TCELECTRONIC 0x001486
+#define OUI_TCELECTRONIC 0x000166
#define DICE_CATEGORY_ID 0x04
#define WEISS_CATEGORY_ID 0x00
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index b3cffd01a19f..a4688545339c 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -28,6 +28,9 @@
*/
#define MAX_MIDI_RX_BLOCKS 8
+/* 3 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) + 1. */
+#define MAX_MIDI_PORTS 3
+
/*
* The double-oh-three algorithm was discovered by Robin Gareus and Damien
* Zammit in 2012, with reverse-engineering for Digi 003 Rack.
@@ -42,10 +45,8 @@ struct amdtp_dot {
unsigned int pcm_channels;
struct dot_state state;
- unsigned int midi_ports;
- /* 2 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) */
- struct snd_rawmidi_substream *midi[2];
- int midi_fifo_used[2];
+ struct snd_rawmidi_substream *midi[MAX_MIDI_PORTS];
+ int midi_fifo_used[MAX_MIDI_PORTS];
int midi_fifo_limit;
void (*transfer_samples)(struct amdtp_stream *s,
@@ -124,8 +125,8 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate,
return -EBUSY;
/*
- * A first data channel is for MIDI conformant data channel, the rest is
- * Multi Bit Linear Audio data channel.
+ * A first data channel is for MIDI messages, the rest is Multi Bit
+ * Linear Audio data channel.
*/
err = amdtp_stream_set_parameters(s, rate, pcm_channels + 1);
if (err < 0)
@@ -135,11 +136,6 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate,
p->pcm_channels = pcm_channels;
- if (s->direction == AMDTP_IN_STREAM)
- p->midi_ports = DOT_MIDI_IN_PORTS;
- else
- p->midi_ports = DOT_MIDI_OUT_PORTS;
-
/*
* We do not know the actual MIDI FIFO size of most devices. Just
* assume two bytes, i.e., one byte can be received over the bus while
@@ -281,13 +277,25 @@ static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer,
b = (u8 *)&buffer[0];
len = 0;
- if (port < p->midi_ports &&
+ if (port < MAX_MIDI_PORTS &&
midi_ratelimit_per_packet(s, port) &&
p->midi[port] != NULL)
len = snd_rawmidi_transmit(p->midi[port], b + 1, 2);
if (len > 0) {
- b[3] = (0x10 << port) | len;
+ /*
+ * Upper 4 bits of LSB represent port number.
+ * - 0000b: physical MIDI port 1.
+ * - 0010b: physical MIDI port 2.
+ * - 1110b: console MIDI port.
+ */
+ if (port == 2)
+ b[3] = 0xe0;
+ else if (port == 1)
+ b[3] = 0x20;
+ else
+ b[3] = 0x00;
+ b[3] |= len;
midi_use_bytes(s, port, len);
} else {
b[1] = 0;
@@ -309,11 +317,22 @@ static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer,
for (f = 0; f < data_blocks; f++) {
b = (u8 *)&buffer[0];
- port = b[3] >> 4;
- len = b[3] & 0x0f;
- if (port < p->midi_ports && p->midi[port] && len > 0)
- snd_rawmidi_receive(p->midi[port], b + 1, len);
+ len = b[3] & 0x0f;
+ if (len > 0) {
+ /*
+ * Upper 4 bits of LSB represent port number.
+ * - 0000b: physical MIDI port 1. Use port 0.
+ * - 1110b: console MIDI port. Use port 2.
+ */
+ if (b[3] >> 4 > 0)
+ port = 2;
+ else
+ port = 0;
+
+ if (port < MAX_MIDI_PORTS && p->midi[port])
+ snd_rawmidi_receive(p->midi[port], b + 1, len);
+ }
buffer += s->data_block_quadlets;
}
@@ -364,7 +383,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
{
struct amdtp_dot *p = s->protocol;
- if (port < p->midi_ports)
+ if (port < MAX_MIDI_PORTS)
ACCESS_ONCE(p->midi[port]) = midi;
}
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index cc4776c6ded3..1f5e1d23f31a 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -13,7 +13,8 @@ MODULE_AUTHOR("Takashi Sakamoto <o-takashi@sakamocchi.jp>");
MODULE_LICENSE("GPL v2");
#define VENDOR_DIGIDESIGN 0x00a07e
-#define MODEL_DIGI00X 0x000002
+#define MODEL_CONSOLE 0x000001
+#define MODEL_RACK 0x000002
static int name_card(struct snd_dg00x *dg00x)
{
@@ -129,6 +130,8 @@ static int snd_dg00x_probe(struct fw_unit *unit,
spin_lock_init(&dg00x->lock);
init_waitqueue_head(&dg00x->hwdep_wait);
+ dg00x->is_console = entry->model_id == MODEL_CONSOLE;
+
/* Allocate and register this sound card later. */
INIT_DEFERRABLE_WORK(&dg00x->dwork, do_registration);
snd_fw_schedule_registration(unit, &dg00x->dwork);
@@ -183,7 +186,13 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = {
.match_flags = IEEE1394_MATCH_VENDOR_ID |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
- .model_id = MODEL_DIGI00X,
+ .model_id = MODEL_CONSOLE,
+ },
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = VENDOR_DIGIDESIGN,
+ .model_id = MODEL_RACK,
},
{}
};
diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h
index 2cd465c0caae..43bcb0ce69c0 100644
--- a/sound/firewire/digi00x/digi00x.h
+++ b/sound/firewire/digi00x/digi00x.h
@@ -60,6 +60,7 @@ struct snd_dg00x {
/* For asynchronous MIDI controls. */
struct snd_rawmidi_substream *in_control;
struct snd_fw_async_midi_port out_control;
+ bool is_console;
};
#define DG00X_ADDR_BASE 0xffffe0000000ull
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb617175fde..a31a70dccecf 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
#include "hpi_internal.h"
#include "hpimsginit.h"
+#include <linux/nospec.h>
/* The actual message size for each object type */
static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = msg_size[object];
- else
+ } else {
size = sizeof(*phm);
+ }
memset(phm, 0, size);
phm->size = size;
@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = res_size[object];
- else
+ } else {
size = sizeof(*phr);
+ }
memset(phr, 0, sizeof(*phr));
phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7e3aa50b21f9..3ef9af53ef49 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
#include <linux/stringify.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#ifdef MODULE_FIRMWARE
MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hpi_adapter *pa = NULL;
if (hm->h.adapter_index < ARRAY_SIZE(adapters))
- pa = &adapters[hm->h.adapter_index];
+ pa = &adapters[array_index_nospec(hm->h.adapter_index,
+ ARRAY_SIZE(adapters))];
if (!pa || !pa->adapter || !pa->adapter->type) {
hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e76968..cc009a4a3d1d 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
if (get_user(verb, &arg->verb))
return -EFAULT;
- res = get_wcaps(codec, verb >> 24);
+ /* open-code get_wcaps(verb>>24) with nospec */
+ verb >>= 24;
+ if (verb < codec->core.start_nid ||
+ verb >= codec->core.start_nid + codec->core.num_nodes) {
+ res = 0;
+ } else {
+ verb -= codec->core.start_nid;
+ verb = array_index_nospec(verb, codec->core.num_nodes);
+ res = codec->wcaps[verb];
+ }
if (put_user(res, &arg->res))
return -EFAULT;
return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 293f3f213776..7d3f88d90eec 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -185,6 +185,10 @@ module_param(power_save, xint, 0644);
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
"(in second, 0 = disable).");
+static bool pm_blacklist = true;
+module_param(pm_blacklist, bool, 0644);
+MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist");
+
/* reset the HD-audio controller in power save mode.
* this may give more power-saving, but will take longer time to
* wake up.
@@ -369,8 +373,10 @@ enum {
#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
- IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
+ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \
+ IS_GLK(pci)
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
@@ -1508,7 +1514,8 @@ static void azx_check_snoop_available(struct azx *chip)
*/
u8 val;
pci_read_config_byte(chip->pci, 0x42, &val);
- if (!(val & 0x80) && chip->pci->revision == 0x30)
+ if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
+ chip->pci->revision == 0x20))
snoop = false;
}
@@ -2042,6 +2049,24 @@ out_free:
return err;
}
+#ifdef CONFIG_PM
+/* On some boards setting power_save to a non 0 value leads to clicking /
+ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
+ * figure out how to avoid these sounds, but that is not always feasible.
+ * So we keep a list of devices where we disable powersaving as its known
+ * to causes problems on these devices.
+ */
+static struct snd_pci_quirk power_save_blacklist[] = {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ {}
+};
+#endif /* CONFIG_PM */
+
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
@@ -2054,6 +2079,7 @@ static int azx_probe_continue(struct azx *chip)
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
int dev = chip->dev_index;
+ int val;
int err;
hda->probe_continued = 1;
@@ -2129,7 +2155,21 @@ static int azx_probe_continue(struct azx *chip)
chip->running = 1;
azx_add_card_list(chip);
- snd_hda_set_power_save(&chip->bus, power_save * 1000);
+
+ val = power_save;
+#ifdef CONFIG_PM
+ if (pm_blacklist) {
+ const struct snd_pci_quirk *q;
+
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+#endif /* CONFIG_PM */
+ snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
pm_runtime_put_autosuspend(&pci->dev);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 2c3065c1f3fb..b3851b991120 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -849,6 +849,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 89c166b97e81..7ece1ab57eef 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0235:
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
@@ -3261,8 +3262,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
pinval &= ~AC_PINCTL_VREFEN;
pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
- if (spec->mute_led_nid)
+ if (spec->mute_led_nid) {
+ /* temporarily power up/down for setting VREF */
+ snd_hda_power_up_pm(codec);
snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
+ snd_hda_power_down_pm(codec);
+ }
}
/* Make sure the led works even in runtime suspend */
@@ -4480,13 +4485,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ } else if (action == HDA_FIXUP_ACT_INIT) {
/* Enable DOCK device */
snd_hda_codec_write(codec, 0x17, 0,
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
/* Enable DOCK device */
snd_hda_codec_write(codec, 0x19, 0,
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
- snd_hda_apply_pincfgs(codec, pincfgs);
}
}
@@ -4759,6 +4765,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
}
}
+/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
+static void alc295_fixup_disable_dac3(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ hda_nid_t conn[2] = { 0x02, 0x03 };
+ snd_hda_override_conn_list(codec, 0x17, 2, conn);
+ }
+}
+
/* Hook to update amp GPIO4 for automute */
static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
struct hda_jack_callback *jack)
@@ -4908,6 +4924,7 @@ enum {
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_FIXUP_DISABLE_DAC3,
ALC280_FIXUP_HP_HEADSET_MIC,
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
@@ -5600,6 +5617,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
},
+ [ALC295_FIXUP_DISABLE_DAC3] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc295_fixup_disable_dac3,
+ },
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -5663,6 +5684,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+ SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5784,9 +5806,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -6336,6 +6360,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0298:
spec->codec_variant = ALC269_TYPE_ALC298;
break;
+ case 0x10ec0235:
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
@@ -6761,6 +6786,7 @@ enum {
ALC668_FIXUP_DELL_DISABLE_AAMIX,
ALC668_FIXUP_DELL_XPS13,
ALC662_FIXUP_ASUS_Nx50,
+ ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
ALC668_FIXUP_ASUS_Nx51,
ALC891_FIXUP_HEADSET_MODE,
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
@@ -7012,14 +7038,21 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_BASS_1A
},
+ [ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mode_alc668,
+ .chain_id = ALC662_FIXUP_BASS_CHMAP
+ },
[ALC668_FIXUP_ASUS_Nx51] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
- {0x1a, 0x90170151}, /* bass speaker */
+ { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */
+ { 0x1a, 0x90170151 }, /* bass speaker */
+ { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */
{}
},
.chained = true,
- .chain_id = ALC662_FIXUP_BASS_CHMAP,
+ .chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
},
[ALC891_FIXUP_HEADSET_MODE] = {
.type = HDA_FIXUP_FUNC,
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 14bbf55c1ef9..9899ef4c7efa 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
#include <linux/pci.h>
#include <linux/math64.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -5692,40 +5693,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+ unsigned int channel = info->channel;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_out[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_out);
+ if (hdspm->channel_map_out[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_out[info->channel] *
+ info->offset = hdspm->channel_map_out[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
} else {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_in[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_in);
+ if (hdspm->channel_map_in[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_in[info->channel] *
+ info->offset = hdspm->channel_map_in[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
}
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 55172c689991..a76b1f147660 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -2036,9 +2037,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
return -EINVAL;
- if ((chn = rme9652->channel_map[info->channel]) < 0) {
+ chn = rme9652->channel_map[array_index_nospec(info->channel,
+ RME9652_NCHANNELS)];
+ if (chn < 0)
return -EINVAL;
- }
info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
info->first = 0;
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index f9f2737c4ad2..a4871c4adcb0 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -882,6 +882,7 @@ static int nau8825_adc_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
+ msleep(125);
regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
break;
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index f5d34153e21f..f0c9e2562474 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -1736,6 +1736,7 @@ static const struct regmap_config rt5651_regmap = {
.num_reg_defaults = ARRAY_SIZE(rt5651_reg),
.ranges = rt5651_ranges,
.num_ranges = ARRAY_SIZE(rt5651_ranges),
+ .use_single_rw = true,
};
#if defined(CONFIG_OF)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index abc802a5a479..65ac4518ad06 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -5035,6 +5035,12 @@ static const struct i2c_device_id rt5677_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
+static const struct of_device_id rt5677_of_match[] = {
+ { .compatible = "realtek,rt5677", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rt5677_of_match);
+
static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false };
static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false };
static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false };
@@ -5294,6 +5300,7 @@ static int rt5677_i2c_remove(struct i2c_client *i2c)
static struct i2c_driver rt5677_i2c_driver = {
.driver = {
.name = "rt5677",
+ .of_match_table = rt5677_of_match,
},
.probe = rt5677_i2c_probe,
.remove = rt5677_i2c_remove,
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 1589325855bc..3dba5550a665 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -774,15 +774,26 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
static int sgtl5000_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct sgtl5000_priv *sgtl = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
case SND_SOC_BIAS_STANDBY:
+ regcache_cache_only(sgtl->regmap, false);
+ ret = regcache_sync(sgtl->regmap);
+ if (ret) {
+ regcache_cache_only(sgtl->regmap, true);
+ return ret;
+ }
+
snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_REFTOP_POWERUP,
SGTL5000_REFTOP_POWERUP);
break;
case SND_SOC_BIAS_OFF:
+ regcache_cache_only(sgtl->regmap, true);
snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_REFTOP_POWERUP, 0);
break;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 993bde29ca1b..7693e63078b1 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -54,10 +54,17 @@ struct ssm2602_priv {
* using 2 wire for device control, so we cache them instead.
* There is no point in caching the reset register
*/
-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
- 0x0097, 0x0097, 0x0079, 0x0079,
- 0x000a, 0x0008, 0x009f, 0x000a,
- 0x0000, 0x0000
+static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
+ { .reg = 0x00, .def = 0x0097 },
+ { .reg = 0x01, .def = 0x0097 },
+ { .reg = 0x02, .def = 0x0079 },
+ { .reg = 0x03, .def = 0x0079 },
+ { .reg = 0x04, .def = 0x000a },
+ { .reg = 0x05, .def = 0x0008 },
+ { .reg = 0x06, .def = 0x009f },
+ { .reg = 0x07, .def = 0x000a },
+ { .reg = 0x08, .def = 0x0000 },
+ { .reg = 0x09, .def = 0x0000 }
};
@@ -620,8 +627,8 @@ const struct regmap_config ssm2602_regmap_config = {
.volatile_reg = ssm2602_register_volatile,
.cache_type = REGCACHE_RBTREE,
- .reg_defaults_raw = ssm2602_reg,
- .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
+ .reg_defaults = ssm2602_reg,
+ .num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
};
EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 38bfd46f4ad8..3ef174531344 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -145,6 +145,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
+ /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
+ if (ratio <= 256) {
+ pm = ratio;
+ fp = 1;
+ goto out;
+ }
+
/* Set the max fluctuation -- 0.1% of the max devisor */
savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index dd88c2cb6470..48804e4ab530 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -201,7 +201,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
if (ret < 0)
return ret;
- ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+ ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
if (ret < 0)
return ret;
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 0bfa68862460..cd16b431d1bd 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -420,7 +420,21 @@ static const struct dmi_system_id byt_table[] = {
.callback = byt_thinkpad10_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20C3001VHH"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
+ },
+ },
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
+ },
+ },
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
},
},
{ }
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 4ccc80e5e8cc..c798f8d4ae43 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
sst_free_block(sst_drv_ctx, block);
out:
test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
- return 0;
+ return ret;
}
/*
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 90525614c20a..b1eb696f33b6 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -111,6 +111,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -121,6 +122,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"IN1N", NULL, "Headset Mic"},
{"DMIC L1", NULL, "Int Mic"},
{"DMIC R1", NULL, "Int Mic"},
+ {"IN2P", NULL, "Int Analog Mic"},
+ {"IN2N", NULL, "Int Analog Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Ext Spk", NULL, "SPOL"},
@@ -134,6 +137,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"Headphone", NULL, "Platform Clock"},
{"Headset Mic", NULL, "Platform Clock"},
{"Int Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "micbias1"},
+ {"Int Analog Mic", NULL, "micbias2"},
{"Ext Spk", NULL, "Platform Clock"},
};
@@ -162,6 +168,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
SOC_DAPM_PIN_SWITCH("Ext Spk"),
};
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 805b7f2173f3..78472c908ae9 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -331,7 +331,11 @@ int skl_resume_dsp(struct skl *skl)
if (skl->skl_sst->is_first_boot == true)
return 0;
+ /* disable dynamic clock gating during fw and lib download */
+ ctx->enable_miscbdcge(ctx->dev, false);
+
ret = skl_dsp_wake(ctx->dsp);
+ ctx->enable_miscbdcge(ctx->dev, true);
if (ret < 0)
return ret;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 58c728662600..2fd213cd9a40 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1191,7 +1191,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
return -EIO;
}
+ /* disable dynamic clock gating during fw and lib download */
+ skl->skl_sst->enable_miscbdcge(platform->dev, false);
+
ret = ops->init_fw(platform->dev, skl->skl_sst);
+ skl->skl_sst->enable_miscbdcge(platform->dev, true);
if (ret < 0) {
dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 06fa5e85dd0e..3e526fbd267e 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -457,7 +457,7 @@ static int probe_codec(struct hdac_ext_bus *ebus, int addr)
struct hdac_bus *bus = ebus_to_hbus(ebus);
unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
- unsigned int res;
+ unsigned int res = -1;
mutex_lock(&bus->cmd_mutex);
snd_hdac_bus_send_cmd(bus, cmd);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index b6615affe571..fde974d52bb2 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
/* polling the AC_R_FINISH */
while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
/* polling the AC_W_FINISH */
while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout)
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 7d92a24b7cfa..98835e9d1d7d 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -82,6 +82,9 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
[9] = 0x2,
};
+ if (unlikely(!src))
+ return -EIO;
+
data = path[rsnd_mod_id(src)] |
cmd_case[rsnd_mod_id(src)] << 16;
}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index a9a43acce30e..17e305b71fd9 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -233,6 +233,15 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
/*
+ * It will set SSIWSR.CONT here, but SSICR.CKDV = 000
+ * with it is not allowed. (SSIWSR.WS_MODE with
+ * SSICR.CKDV = 000 is not allowed either).
+ * Skip it. See SSICR.CKDV
+ */
+ if (j == 0)
+ continue;
+
+ /*
* this driver is assuming that
* system word is 32bit x chan
* see rsnd_ssi_init()
@@ -543,6 +552,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 *buf = (u32 *)(runtime->dma_area +
rsnd_dai_pointer_offset(io, 0));
+ int shift = 0;
+
+ switch (runtime->sample_bits) {
+ case 32:
+ shift = 8;
+ break;
+ }
/*
* 8/16/32 data can be assesse to TDR/RDR register
@@ -550,9 +566,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
* see rsnd_ssi_init()
*/
if (rsnd_io_is_play(io))
- rsnd_mod_write(mod, SSITDR, *buf);
+ rsnd_mod_write(mod, SSITDR, (*buf) << shift);
else
- *buf = rsnd_mod_read(mod, SSIRDR);
+ *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
}
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index d0fb2f205bd9..4f4ebe90f1a8 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
}
usb_fill_int_urb(urb, line6->usbdev,
- usb_sndbulkpipe(line6->usbdev,
+ usb_sndintpipe(line6->usbdev,
line6->properties->ep_ctrl_w),
transfer_buffer, length, midi_sent, line6,
line6->interval);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 9038b2e7df73..eaa03acd4686 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -353,8 +353,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
/*
* Dell usb dock with ALC4020 codec had a firmware problem where it got
* screwed up when zero volume is passed; just skip it as a workaround
+ *
+ * Also the extension unit gives an access error, so skip it as well.
*/
static const struct usbmix_name_map dell_alc4020_map[] = {
+ { 4, NULL }, /* extension unit */
{ 16, NULL },
{ 19, NULL },
{ 0 }
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 8a59d4782a0f..69bf5cf1e91e 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
},
+{
+ /*
+ * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
+ * even though it advertises more. The capture interface doesn't work
+ * even on windows.
+ */
+ USB_DEVICE(0x19b5, 0x0021),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Capture */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1cd7f8b0bf77..45655b9108e8 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1175,6 +1175,7 @@ static bool is_teac_dsd_dac(unsigned int id)
switch (id) {
case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
+ case USB_ID(0x0644, 0x804a): /* TEAC UD-301 */
return true;
}
return false;
diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
index 503ae072244c..9ab2d0ad22d5 100644
--- a/tools/lib/str_error_r.c
+++ b/tools/lib/str_error_r.c
@@ -21,6 +21,6 @@ char *str_error_r(int errnum, char *buf, size_t buflen)
{
int err = strerror_r(errnum, buf, buflen);
if (err)
- snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
+ snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
return buf;
}
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index 6518bea926d6..68af60fdf0b9 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -29,10 +29,13 @@ static void pager_preexec(void)
* have real input
*/
fd_set in;
+ fd_set exception;
FD_ZERO(&in);
+ FD_ZERO(&exception);
FD_SET(0, &in);
- select(1, &in, NULL, &in, NULL);
+ FD_SET(0, &exception);
+ select(1, &in, NULL, &exception, NULL);
setenv("LESS", "FRSX", 0);
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index f87996b0cb29..9a250c71840e 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -442,9 +442,9 @@ static int perf_del_probe_events(struct strfilter *filter)
}
if (ret == -ENOENT && ret2 == -ENOENT)
- pr_debug("\"%s\" does not hit any event.\n", str);
- /* Note that this is silently ignored */
- ret = 0;
+ pr_warning("\"%s\" does not hit any event.\n", str);
+ else
+ ret = 0;
error:
if (kfd >= 0)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 688dea7cb08f..68861e81f06c 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -146,6 +146,7 @@ static aggr_get_id_t aggr_get_id;
static bool append_file;
static const char *output_name;
static int output_fd;
+static int print_free_counters_hint;
struct perf_stat {
bool record;
@@ -310,8 +311,12 @@ static int read_counter(struct perf_evsel *counter)
struct perf_counts_values *count;
count = perf_counts(counter->counts, cpu, thread);
- if (perf_evsel__read(counter, cpu, thread, count))
+ if (perf_evsel__read(counter, cpu, thread, count)) {
+ counter->counts->scaled = -1;
+ perf_counts(counter->counts, cpu, thread)->ena = 0;
+ perf_counts(counter->counts, cpu, thread)->run = 0;
return -1;
+ }
if (STAT_RECORD) {
if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
@@ -336,12 +341,14 @@ static int read_counter(struct perf_evsel *counter)
static void read_counters(void)
{
struct perf_evsel *counter;
+ int ret;
evlist__for_each_entry(evsel_list, counter) {
- if (read_counter(counter))
+ ret = read_counter(counter);
+ if (ret)
pr_debug("failed to read counter %s\n", counter->name);
- if (perf_stat_process_counter(&stat_config, counter))
+ if (ret == 0 && perf_stat_process_counter(&stat_config, counter))
pr_warning("failed to process counter %s\n", counter->name);
}
}
@@ -869,7 +876,7 @@ static void print_metric_csv(void *ctx,
char buf[64], *vals, *ends;
if (unit == NULL || fmt == NULL) {
- fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep);
+ fprintf(out, "%s%s", csv_sep, csv_sep);
return;
}
snprintf(buf, sizeof(buf), fmt, val);
@@ -1109,6 +1116,9 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
csv_sep);
+ if (counter->supported)
+ print_free_counters_hint = 1;
+
fprintf(stat_config.output, "%-*s%s",
csv_output ? 0 : unit_width,
counter->unit, csv_sep);
@@ -1477,6 +1487,13 @@ static void print_footer(void)
avg_stats(&walltime_nsecs_stats));
}
fprintf(output, "\n\n");
+
+ if (print_free_counters_hint)
+ fprintf(output,
+"Some events weren't counted. Try disabling the NMI watchdog:\n"
+" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
+" perf stat ...\n"
+" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
}
static void print_counters(struct timespec *ts, int argc, const char **argv)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 21f8a81797a0..0fd8bfb77f65 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -679,6 +679,10 @@ static struct syscall_fmt {
{ .name = "mlockall", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mmap", .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+ .alias = "old_mmap",
+#endif
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[2] = SCA_MMAP_PROT, /* prot */
[3] = SCA_MMAP_FLAGS, /* flags */ }, },
@@ -822,12 +826,21 @@ struct syscall {
void **arg_parm;
};
-static size_t fprintf_duration(unsigned long t, FILE *fp)
+/*
+ * We need to have this 'calculated' boolean because in some cases we really
+ * don't know what is the duration of a syscall, for instance, when we start
+ * a session and some threads are waiting for a syscall to finish, say 'poll',
+ * in which case all we can do is to print "( ? ) for duration and for the
+ * start timestamp.
+ */
+static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
{
double duration = (double)t / NSEC_PER_MSEC;
size_t printed = fprintf(fp, "(");
- if (duration >= 1.0)
+ if (!calculated)
+ printed += fprintf(fp, " ? ");
+ else if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
@@ -1030,13 +1043,27 @@ static bool trace__filter_duration(struct trace *trace, double t)
return t < (trace->duration_filter * NSEC_PER_MSEC);
}
-static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
+static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
{
double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
return fprintf(fp, "%10.3f ", ts);
}
+/*
+ * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
+ * using ttrace->entry_time for a thread that receives a sys_exit without
+ * first having received a sys_enter ("poll" issued before tracing session
+ * starts, lost sys_enter exit due to ring buffer overflow).
+ */
+static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
+{
+ if (tstamp > 0)
+ return __trace__fprintf_tstamp(trace, tstamp, fp);
+
+ return fprintf(fp, " ? ");
+}
+
static bool done = false;
static bool interrupted = false;
@@ -1047,10 +1074,10 @@ static void sig_handler(int sig)
}
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
- u64 duration, u64 tstamp, FILE *fp)
+ u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
{
size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
- printed += fprintf_duration(duration, fp);
+ printed += fprintf_duration(duration, duration_calculated, fp);
if (trace->multiple_threads) {
if (trace->show_comm)
@@ -1452,7 +1479,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
duration = sample->time - ttrace->entry_time;
- printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
+ printed = trace__fprintf_entry_head(trace, trace->current, duration, true, ttrace->entry_time, trace->output);
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
@@ -1499,7 +1526,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
- trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
+ trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
}
} else {
@@ -1547,6 +1574,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
{
long ret;
u64 duration = 0;
+ bool duration_calculated = false;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
@@ -1577,6 +1605,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
duration = sample->time - ttrace->entry_time;
if (trace__filter_duration(trace, duration))
goto out;
+ duration_calculated = true;
} else if (trace->duration_filter)
goto out;
@@ -1592,7 +1621,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (trace->summary_only)
goto out;
- trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
+ trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
if (ttrace->entry_pending) {
fprintf(trace->output, "%-70s", ttrace->entry_str);
@@ -1855,7 +1884,7 @@ static int trace__pgfault(struct trace *trace,
thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
sample->ip, &al);
- trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
+ trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
fprintf(trace->output, "%sfault [",
evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index 76f41f249944..6cd9e5107f77 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -61,6 +61,7 @@ int test__kmod_path__parse(int subtest __maybe_unused)
M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true);
M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false);
+#ifdef HAVE_ZLIB_SUPPORT
/* path alloc_name alloc_ext kmod comp name ext */
T("/xxxx/xxxx/x.ko.gz", true , true , true, true, "[x]", "gz");
T("/xxxx/xxxx/x.ko.gz", false , true , true, true, NULL , "gz");
@@ -96,6 +97,7 @@ int test__kmod_path__parse(int subtest __maybe_unused)
M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
M("x.ko.gz", PERF_RECORD_MISC_USER, false);
+#endif
/* path alloc_name alloc_ext kmod comp name ext */
T("[test_module]", true , true , true, false, "[test_module]", NULL);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 430d039d0079..a38227eb5450 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1250,6 +1250,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
{
char linkname[PATH_MAX];
char *build_id_filename;
+ char *build_id_path = NULL;
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso))
@@ -1265,8 +1266,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
goto fallback;
}
+ build_id_path = strdup(filename);
+ if (!build_id_path)
+ return -1;
+
+ dirname(build_id_path);
+
if (dso__is_kcore(dso) ||
- readlink(filename, linkname, sizeof(linkname)) < 0 ||
+ readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
strstr(linkname, DSO__NAME_KALLSYMS) ||
access(filename, R_OK)) {
fallback:
@@ -1278,6 +1285,7 @@ fallback:
__symbol__join_symfs(filename, filename_size, dso->long_name);
}
+ free(build_id_path);
return 0;
}
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e528c40739cc..993ef2762508 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -182,13 +182,17 @@ char *build_id_cache__origname(const char *sbuild_id)
char buf[PATH_MAX];
char *ret = NULL, *p;
size_t offs = 5; /* == strlen("../..") */
+ ssize_t len;
linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!linkname)
return NULL;
- if (readlink(linkname, buf, PATH_MAX) < 0)
+ len = readlink(linkname, buf, sizeof(buf) - 1);
+ if (len <= 0)
goto out;
+ buf[len] = '\0';
+
/* The link should be "../..<origpath>/<sbuild_id>" */
p = strrchr(buf, '/'); /* Cut off the "/<sbuild_id>" */
if (p && (p > buf + offs)) {
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 8ab0d7da956b..663192395780 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -255,8 +255,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
if (machine__is_default_guest(machine))
return 0;
- snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
- machine->root_dir, pid);
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
+ machine->root_dir, pid, pid);
fp = fopen(filename, "r");
if (fp == NULL) {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 8bc271141d9d..bce80f866dd0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1221,7 +1221,7 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
if (FD(evsel, cpu, thread) < 0)
return -EINVAL;
- if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
+ if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) <= 0)
return -errno;
return 0;
@@ -1239,7 +1239,7 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
return -ENOMEM;
- if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
+ if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
return -errno;
perf_evsel__compute_deltas(evsel, cpu, thread, &count);
@@ -2400,11 +2400,17 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
char sbuf[STRERR_BUFSIZE];
+ int printed = 0;
switch (err) {
case EPERM:
case EACCES:
- return scnprintf(msg, size,
+ if (err == EPERM)
+ printed = scnprintf(msg, size,
+ "No permission to enable %s event.\n\n",
+ perf_evsel__name(evsel));
+
+ return scnprintf(msg + printed, size - printed,
"You may not have permission to collect %sstats.\n\n"
"Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
"which controls use of the performance events system by\n"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 28bdb48357f0..ab36aa5585b4 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1454,8 +1454,16 @@ static int __event_process_build_id(struct build_id_event *bev,
dso__set_build_id(dso, &bev->build_id);
- if (!is_kernel_module(filename, cpumode))
- dso->kernel = dso_type;
+ if (dso_type != DSO_TYPE_USER) {
+ struct kmod_path m = { .name = NULL, };
+
+ if (!kmod_path__parse_name(&m, filename) && m.kmod)
+ dso__set_short_name(dso, strdup(m.name), true);
+ else
+ dso->kernel = dso_type;
+
+ free(m.name);
+ }
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
sbuild_id);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 7e27207d0f45..cac39532c057 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1300,6 +1300,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
intel_pt_clear_tx_flags(decoder);
decoder->have_tma = false;
decoder->cbr = 0;
+ decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
return -EOVERFLOW;
@@ -1522,6 +1523,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
case INTEL_PT_PSBEND:
intel_pt_log("ERROR: Missing TIP after FUP\n");
decoder->pkt_state = INTEL_PT_STATE_ERR3;
+ decoder->pkt_step = 0;
return -ENOENT;
case INTEL_PT_OVF:
@@ -2182,14 +2184,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
return &decoder->state;
}
-static bool intel_pt_at_psb(unsigned char *buf, size_t len)
-{
- if (len < INTEL_PT_PSB_LEN)
- return false;
- return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
- INTEL_PT_PSB_LEN);
-}
-
/**
* intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
* @buf: pointer to buffer pointer
@@ -2278,6 +2272,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
* @buf: buffer
* @len: size of buffer
* @tsc: TSC value returned
+ * @rem: returns remaining size when TSC is found
*
* Find a TSC packet in @buf and return the TSC value. This function assumes
* that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
@@ -2285,7 +2280,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
*
* Return: %true if TSC is found, false otherwise.
*/
-static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
+static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
+ size_t *rem)
{
struct intel_pt_pkt packet;
int ret;
@@ -2296,6 +2292,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
return false;
if (packet.type == INTEL_PT_TSC) {
*tsc = packet.payload;
+ *rem = len;
return true;
}
if (packet.type == INTEL_PT_PSBEND)
@@ -2346,6 +2343,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
* @len_a: size of first buffer
* @buf_b: second buffer
* @len_b: size of second buffer
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ * to buf_a
*
* If the trace contains TSC we can look at the last TSC of @buf_a and the
* first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -2358,33 +2357,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
size_t len_a,
unsigned char *buf_b,
- size_t len_b)
+ size_t len_b, bool *consecutive)
{
uint64_t tsc_a, tsc_b;
unsigned char *p;
- size_t len;
+ size_t len, rem_a, rem_b;
p = intel_pt_last_psb(buf_a, len_a);
if (!p)
return buf_b; /* No PSB in buf_a => no overlap */
len = len_a - (p - buf_a);
- if (!intel_pt_next_tsc(p, len, &tsc_a)) {
+ if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
/* The last PSB+ in buf_a is incomplete, so go back one more */
len_a -= len;
p = intel_pt_last_psb(buf_a, len_a);
if (!p)
return buf_b; /* No full PSB+ => assume no overlap */
len = len_a - (p - buf_a);
- if (!intel_pt_next_tsc(p, len, &tsc_a))
+ if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
return buf_b; /* No TSC in buf_a => assume no overlap */
}
while (1) {
/* Ignore PSB+ with no TSC */
- if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
- intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
- return buf_b; /* tsc_a < tsc_b => no overlap */
+ if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
+ int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
+
+ /* Same TSC, so buffers are consecutive */
+ if (!cmp && rem_b >= rem_a) {
+ *consecutive = true;
+ return buf_b + len_b - (rem_b - rem_a);
+ }
+ if (cmp < 0)
+ return buf_b; /* tsc_a < tsc_b => no overlap */
+ }
if (!intel_pt_step_psb(&buf_b, &len_b))
return buf_b + len_b; /* No PSB in buf_b => no data */
@@ -2398,6 +2405,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
* @buf_b: second buffer
* @len_b: size of second buffer
* @have_tsc: can use TSC packets to detect overlap
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ * to buf_a
*
* When trace samples or snapshots are recorded there is the possibility that
* the data overlaps. Note that, for the purposes of decoding, data is only
@@ -2408,7 +2417,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
*/
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc)
+ bool have_tsc, bool *consecutive)
{
unsigned char *found;
@@ -2420,7 +2429,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
return buf_b; /* No overlap */
if (have_tsc) {
- found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
+ found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
+ consecutive);
if (found)
return found;
}
@@ -2435,28 +2445,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
}
/* Now len_b >= len_a */
- if (len_b > len_a) {
- /* The leftover buffer 'b' must start at a PSB */
- while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
- if (!intel_pt_step_psb(&buf_a, &len_a))
- return buf_b; /* No overlap */
- }
- }
-
while (1) {
/* Potential overlap so check the bytes */
found = memmem(buf_a, len_a, buf_b, len_a);
- if (found)
+ if (found) {
+ *consecutive = true;
return buf_b + len_a;
+ }
/* Try again at next PSB in buffer 'a' */
if (!intel_pt_step_psb(&buf_a, &len_a))
return buf_b; /* No overlap */
-
- /* The leftover buffer 'b' must start at a PSB */
- while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
- if (!intel_pt_step_psb(&buf_a, &len_a))
- return buf_b; /* No overlap */
- }
}
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 89399985fa4d..9ae4df1dcedc 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -103,7 +103,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc);
+ bool have_tsc, bool *consecutive);
int intel_pt__strerror(int code, char *buf, size_t buflen);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dc041d4368c8..b1161d725ce9 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -131,6 +131,7 @@ struct intel_pt_queue {
bool stop;
bool step_through_buffers;
bool use_buffer_pid_tid;
+ bool sync_switch;
pid_t pid, tid;
int cpu;
int switch_state;
@@ -194,14 +195,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
+ bool consecutive = false;
void *start;
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
- pt->have_tsc);
+ pt->have_tsc, &consecutive);
if (!start)
return -EINVAL;
b->use_size = b->data + b->size - start;
b->use_data = start;
+ if (b->use_size && consecutive)
+ b->consecutive = true;
return 0;
}
@@ -928,10 +932,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
if (pt->timeless_decoding || !pt->have_sched_switch)
ptq->use_buffer_pid_tid = true;
}
+
+ ptq->sync_switch = pt->sync_switch;
}
if (!ptq->on_heap &&
- (!pt->sync_switch ||
+ (!ptq->sync_switch ||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
const struct intel_pt_state *state;
int ret;
@@ -1333,7 +1339,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
if (pt->synth_opts.last_branch)
intel_pt_update_last_branch_rb(ptq);
- if (!pt->sync_switch)
+ if (!ptq->sync_switch)
return 0;
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
@@ -1414,6 +1420,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
return switch_ip;
}
+static void intel_pt_enable_sync_switch(struct intel_pt *pt)
+{
+ unsigned int i;
+
+ pt->sync_switch = true;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq)
+ ptq->sync_switch = true;
+ }
+}
+
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{
const struct intel_pt_state *state = ptq->state;
@@ -1430,7 +1451,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
pt->switch_ip, pt->ptss_ip);
- pt->sync_switch = true;
+ intel_pt_enable_sync_switch(pt);
}
}
}
@@ -1446,9 +1467,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA)
return 1;
- if (pt->sync_switch &&
+ if (ptq->sync_switch &&
state->from_ip >= pt->kernel_start) {
- pt->sync_switch = false;
+ ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
@@ -1474,7 +1495,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
/* Use estimated TSC in unknown switch state */
- } else if (pt->sync_switch &&
+ } else if (ptq->sync_switch &&
ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
intel_pt_is_switch_ip(ptq, state->to_ip) &&
ptq->next_tid == -1) {
@@ -1621,7 +1642,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
return 1;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
- if (!ptq)
+ if (!ptq || !ptq->sync_switch)
return 1;
switch (ptq->switch_state) {
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index fe84df1875aa..e70e935b1841 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -79,7 +79,7 @@ static union perf_event *dup_event(struct ordered_events *oe,
static void free_dup_event(struct ordered_events *oe, union perf_event *event)
{
- if (oe->copy_on_queue) {
+ if (event && oe->copy_on_queue) {
oe->cur_alloc_size -= event->header.size;
free(event);
}
@@ -150,6 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
list_move(&event->list, &oe->cache);
oe->nr_events--;
free_dup_event(oe, event->event);
+ event->event = NULL;
}
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 6a6f44dd594b..c93daccec755 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2609,6 +2609,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
out:
free(nbase);
+
+ /* Final validation */
+ if (ret >= 0 && !is_c_func_name(buf)) {
+ pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+ buf);
+ ret = -EINVAL;
+ }
+
return ret;
}
@@ -3060,7 +3068,7 @@ concat_probe_trace_events(struct probe_trace_event **tevs, int *ntevs,
struct probe_trace_event *new_tevs;
int ret = 0;
- if (ntevs == 0) {
+ if (*ntevs == 0) {
*tevs = *tevs2;
*ntevs = ntevs2;
*tevs2 = NULL;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5d61242a6e64..7e0573e55a35 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -139,8 +139,14 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
if (perf_session__open(session) < 0)
goto out_close;
- perf_session__set_id_hdr_size(session);
- perf_session__set_comm_exec(session);
+ /*
+ * set session attributes that are present in perf.data
+ * but not in pipe-mode.
+ */
+ if (!file->is_pipe) {
+ perf_session__set_id_hdr_size(session);
+ perf_session__set_comm_exec(session);
+ }
}
} else {
session->machines.host.env = &perf_env;
@@ -155,7 +161,11 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
pr_warning("Cannot read kernel map\n");
}
- if (tool && tool->ordering_requires_timestamps &&
+ /*
+ * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
+ * processed, so perf_evlist__sample_id_all is not meaningful here.
+ */
+ if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_events = false;
@@ -1628,6 +1638,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
buf = malloc(cur_size);
if (!buf)
return -errno;
+ ordered_events__set_copy_on_queue(oe, true);
more:
event = buf;
err = readn(fd, event, sizeof(struct perf_event_header));
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 452e15a10dd2..031e64ce7156 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -846,6 +846,9 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
static int64_t
sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
{
+ if (!left->branch_info || !right->branch_info)
+ return cmp_null(left->branch_info, right->branch_info);
+
return left->branch_info->flags.cycles -
right->branch_info->flags.cycles;
}
@@ -853,6 +856,8 @@ sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
+ if (!he->branch_info)
+ return scnprintf(bf, size, "%-.*s", width, "N/A");
if (he->branch_info->flags.cycles == 0)
return repsep_snprintf(bf, size, "%-*s", width, "-");
return repsep_snprintf(bf, size, "%-*hd", width,
diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
index e97d7016d771..ce7e6e5c1cea 100644
--- a/tools/perf/util/trigger.h
+++ b/tools/perf/util/trigger.h
@@ -11,7 +11,7 @@
* States and transits:
*
*
- * OFF--(on)--> READY --(hit)--> HIT
+ * OFF--> ON --> READY --(hit)--> HIT
* ^ |
* | (ready)
* | |
@@ -26,8 +26,9 @@ struct trigger {
volatile enum {
TRIGGER_ERROR = -2,
TRIGGER_OFF = -1,
- TRIGGER_READY = 0,
- TRIGGER_HIT = 1,
+ TRIGGER_ON = 0,
+ TRIGGER_READY = 1,
+ TRIGGER_HIT = 2,
} state;
const char *name;
};
@@ -49,7 +50,7 @@ static inline bool trigger_is_error(struct trigger *t)
static inline void trigger_on(struct trigger *t)
{
TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
- t->state = TRIGGER_READY;
+ t->state = TRIGGER_ON;
}
static inline void trigger_ready(struct trigger *t)
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 783a53fb7a4e..b46e1cf347e5 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -38,6 +38,14 @@ static int __report_module(struct addr_location *al, u64 ip,
return 0;
mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (mod) {
+ Dwarf_Addr s;
+
+ dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+ if (s != al->map->start)
+ mod = 0;
+ }
+
if (!mod)
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
dso->long_name, -1, al->map->start,
@@ -167,12 +175,16 @@ frame_callback(Dwfl_Frame *state, void *arg)
{
struct unwind_info *ui = arg;
Dwarf_Addr pc;
+ bool isactivation;
- if (!dwfl_frame_pc(state, &pc, NULL)) {
+ if (!dwfl_frame_pc(state, &pc, &isactivation)) {
pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT;
}
+ if (!isactivation)
+ --pc;
+
return entry(pc, ui) || !(--ui->max_stack) ?
DWARF_CB_ABORT : DWARF_CB_OK;
}
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 20c2e5743903..120383494ff2 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -646,6 +646,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
while (!ret && (unw_step(&c) > 0) && i < max_stack) {
unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+
+ /*
+ * Decrement the IP for any non-activation frames.
+ * this is required to properly find the srcline
+ * for caller frames.
+ * See also the documentation for dwfl_frame_pc(),
+ * which this code tries to replicate.
+ */
+ if (unw_is_signal_frame(&c) <= 0)
+ --ips[i];
+
++i;
}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 85c56800f17a..dfb010bd29f2 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -207,7 +207,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
size -= ret;
off_in += ret;
- off_out -= ret;
+ off_out += ret;
}
munmap(ptr, off_in + size);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 71620fa95953..d025eafc09c2 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1908,6 +1908,7 @@ static __init int nfit_test_init(void)
put_device(&pdev->dev);
goto err_register;
}
+ get_device(&pdev->dev);
rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
@@ -1926,6 +1927,10 @@ static __init int nfit_test_init(void)
if (instances[i])
platform_device_unregister(&instances[i]->pdev);
nfit_test_teardown();
+ for (i = 0; i < NUM_NFITS; i++)
+ if (instances[i])
+ put_device(&instances[i]->pdev.dev);
+
return rc;
}
@@ -1933,10 +1938,13 @@ static __exit void nfit_test_exit(void)
{
int i;
- platform_driver_unregister(&nfit_test_driver);
for (i = 0; i < NUM_NFITS; i++)
platform_device_unregister(&instances[i]->pdev);
+ platform_driver_unregister(&nfit_test_driver);
nfit_test_teardown();
+
+ for (i = 0; i < NUM_NFITS; i++)
+ put_device(&instances[i]->pdev.dev);
class_destroy(nfit_test_dimm);
}
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index d8ac9ba67688..99d7f13a5637 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -28,7 +28,12 @@ test_finish()
if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
fi
- echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
+ if [ "$OLD_FWPATH" = "" ]; then
+ # A zero-length write won't work; write a null byte
+ printf '\000' >/sys/module/firmware_class/parameters/path
+ else
+ echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
+ fi
rm -f "$FW"
rmdir "$FWPATH"
}
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index d9c49f41515e..e79ccd6aada1 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -42,12 +42,12 @@ int test_body(void)
printf("Check DSCR TM context switch: ");
fflush(stdout);
for (;;) {
- rv = 1;
asm __volatile__ (
/* set a known value into the DSCR */
"ld 3, %[dscr1];"
"mtspr %[sprn_dscr], 3;"
+ "li %[rv], 1;"
/* start and suspend a transaction */
"tbegin.;"
"beq 1f;"
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 3f81a1095206..50a6371b2b2e 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -51,7 +51,7 @@ then
mkdir $builddir
fi
else
- echo Bad build directory: \"$builddir\"
+ echo Bad build directory: \"$buildloc\"
exit 2
fi
fi
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index cbb0564c0ec4..f68998149351 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1318,7 +1318,7 @@ void change_syscall(struct __test_metadata *_metadata,
iov.iov_len = sizeof(regs);
ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
#endif
- EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, ret) {}
#if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
defined(__s390__) || defined(__hppa__)
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 6eb50152baf0..e5b459a09cff 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -17,7 +17,7 @@ TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
+CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
UNAME_M := $(shell uname -m)
CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index d075ea0e5ca1..ade443a88421 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -95,6 +95,31 @@ asm (
"int3\n\t"
"vmcode_int80:\n\t"
"int $0x80\n\t"
+ "vmcode_popf_hlt:\n\t"
+ "push %ax\n\t"
+ "popf\n\t"
+ "hlt\n\t"
+ "vmcode_umip:\n\t"
+ /* addressing via displacements */
+ "smsw (2052)\n\t"
+ "sidt (2054)\n\t"
+ "sgdt (2060)\n\t"
+ /* addressing via registers */
+ "mov $2066, %bx\n\t"
+ "smsw (%bx)\n\t"
+ "mov $2068, %bx\n\t"
+ "sidt (%bx)\n\t"
+ "mov $2074, %bx\n\t"
+ "sgdt (%bx)\n\t"
+ /* register operands, only for smsw */
+ "smsw %ax\n\t"
+ "mov %ax, (2080)\n\t"
+ "int3\n\t"
+ "vmcode_umip_str:\n\t"
+ "str %eax\n\t"
+ "vmcode_umip_sldt:\n\t"
+ "sldt %eax\n\t"
+ "int3\n\t"
".size vmcode, . - vmcode\n\t"
"end_vmcode:\n\t"
".code32\n\t"
@@ -103,7 +128,8 @@ asm (
extern unsigned char vmcode[], end_vmcode[];
extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
- vmcode_sti[], vmcode_int3[], vmcode_int80[];
+ vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[],
+ vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[];
/* Returns false if the test was skipped. */
static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
@@ -153,13 +179,75 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
(VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
printf("[OK]\tReturned correctly\n");
} else {
- printf("[FAIL]\tIncorrect return reason\n");
+ printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip);
nerrs++;
}
return true;
}
+void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem)
+{
+ struct table_desc {
+ unsigned short limit;
+ unsigned long base;
+ } __attribute__((packed));
+
+ /* Initialize variables with arbitrary values */
+ struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 };
+ struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae };
+ struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 };
+ struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 };
+ unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737;
+
+ /* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */
+ do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests");
+
+ /* Results from displacement-only addressing */
+ msw1 = *(unsigned short *)(test_mem + 2052);
+ memcpy(&idt1, test_mem + 2054, sizeof(idt1));
+ memcpy(&gdt1, test_mem + 2060, sizeof(gdt1));
+
+ /* Results from register-indirect addressing */
+ msw2 = *(unsigned short *)(test_mem + 2066);
+ memcpy(&idt2, test_mem + 2068, sizeof(idt2));
+ memcpy(&gdt2, test_mem + 2074, sizeof(gdt2));
+
+ /* Results when using register operands */
+ msw3 = *(unsigned short *)(test_mem + 2080);
+
+ printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1);
+ printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n",
+ idt1.limit, idt1.base);
+ printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n",
+ gdt1.limit, gdt1.base);
+
+ if (msw1 != msw2 || msw1 != msw3)
+ printf("[FAIL]\tAll the results of SMSW should be the same.\n");
+ else
+ printf("[PASS]\tAll the results from SMSW are identical.\n");
+
+ if (memcmp(&gdt1, &gdt2, sizeof(gdt1)))
+ printf("[FAIL]\tAll the results of SGDT should be the same.\n");
+ else
+ printf("[PASS]\tAll the results from SGDT are identical.\n");
+
+ if (memcmp(&idt1, &idt2, sizeof(idt1)))
+ printf("[FAIL]\tAll the results of SIDT should be the same.\n");
+ else
+ printf("[PASS]\tAll the results from SIDT are identical.\n");
+
+ sethandler(SIGILL, sighandler, 0);
+ do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0,
+ "STR instruction");
+ clearhandler(SIGILL);
+
+ sethandler(SIGILL, sighandler, 0);
+ do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0,
+ "SLDT instruction");
+ clearhandler(SIGILL);
+}
+
int main(void)
{
struct vm86plus_struct v86;
@@ -180,6 +268,9 @@ int main(void)
v86.regs.ds = load_addr / 16;
v86.regs.es = load_addr / 16;
+ /* Use the end of the page as our stack. */
+ v86.regs.esp = 4096;
+
assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
/* #BR -- should deliver SIG??? */
@@ -211,6 +302,23 @@ int main(void)
v86.regs.eflags &= ~X86_EFLAGS_IF;
do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
+ /* POPF with VIP set but IF clear: should not trap */
+ v86.regs.eflags = X86_EFLAGS_VIP;
+ v86.regs.eax = 0;
+ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear");
+
+ /* POPF with VIP set and IF set: should trap */
+ v86.regs.eflags = X86_EFLAGS_VIP;
+ v86.regs.eax = X86_EFLAGS_IF;
+ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set");
+
+ /* POPF with VIP clear and IF set: should not trap */
+ v86.regs.eflags = 0;
+ v86.regs.eax = X86_EFLAGS_IF;
+ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set");
+
+ v86.regs.eflags = 0;
+
/* INT3 -- should cause #BP */
do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
@@ -218,6 +326,9 @@ int main(void)
v86.regs.eax = (unsigned int)-1;
do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80");
+ /* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */
+ do_umip_tests(&v86, addr);
+
/* Execute a null pointer */
v86.regs.cs = 0;
v86.regs.ss = 0;
@@ -231,7 +342,7 @@ int main(void)
clearhandler(SIGSEGV);
/* Make sure nothing explodes if we fork. */
- if (fork() > 0)
+ if (fork() == 0)
return 0;
return (nerrs == 0 ? 0 : 1);
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index 79e1d13d1cda..58384189370c 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -419,8 +419,7 @@ void handler(int signum, siginfo_t *si, void *vucontext)
br_count++;
dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count);
-#define __SI_FAULT (3 << 16)
-#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */
+#define SEGV_BNDERR 3 /* failed address bound checks */
dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n",
status, ip, br_reason);
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 2842a5fa22b3..85a78eba0a93 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -188,17 +188,29 @@ void lots_o_noops_around_write(int *write_to_me)
#define u64 uint64_t
#ifdef __i386__
-#define SYS_mprotect_key 380
-#define SYS_pkey_alloc 381
-#define SYS_pkey_free 382
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 380
+#endif
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 381
+# define SYS_pkey_free 382
+#endif
#define REG_IP_IDX REG_EIP
-#define si_pkey_offset 0x18
+#define si_pkey_offset 0x14
+
#else
-#define SYS_mprotect_key 329
-#define SYS_pkey_alloc 330
-#define SYS_pkey_free 331
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 329
+#endif
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 330
+# define SYS_pkey_free 331
+#endif
#define REG_IP_IDX REG_RIP
#define si_pkey_offset 0x20
+
#endif
void dump_mem(void *dumpme, int len_bytes)
@@ -212,19 +224,18 @@ void dump_mem(void *dumpme, int len_bytes)
}
}
-#define __SI_FAULT (3 << 16)
-#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */
-#define SEGV_PKUERR (__SI_FAULT|4)
+#define SEGV_BNDERR 3 /* failed address bound checks */
+#define SEGV_PKUERR 4
static char *si_code_str(int si_code)
{
- if (si_code & SEGV_MAPERR)
+ if (si_code == SEGV_MAPERR)
return "SEGV_MAPERR";
- if (si_code & SEGV_ACCERR)
+ if (si_code == SEGV_ACCERR)
return "SEGV_ACCERR";
- if (si_code & SEGV_BNDERR)
+ if (si_code == SEGV_BNDERR)
return "SEGV_BNDERR";
- if (si_code & SEGV_PKUERR)
+ if (si_code == SEGV_PKUERR)
return "SEGV_PKUERR";
return "UNKNOWN";
}
@@ -238,7 +249,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
unsigned long ip;
char *fpregs;
u32 *pkru_ptr;
- u64 si_pkey;
+ u64 siginfo_pkey;
u32 *si_pkey_ptr;
int pkru_offset;
fpregset_t fpregset;
@@ -280,9 +291,9 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
dump_mem(si_pkey_ptr - 8, 24);
- si_pkey = *si_pkey_ptr;
- pkey_assert(si_pkey < NR_PKEYS);
- last_si_pkey = si_pkey;
+ siginfo_pkey = *si_pkey_ptr;
+ pkey_assert(siginfo_pkey < NR_PKEYS);
+ last_si_pkey = siginfo_pkey;
if ((si->si_code == SEGV_MAPERR) ||
(si->si_code == SEGV_ACCERR) ||
@@ -294,7 +305,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
/* need __rdpkru() version so we do not do shadow_pkru checking */
dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
- dprintf1("si_pkey from siginfo: %jx\n", si_pkey);
+ dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
*(u64 *)pkru_ptr = 0x00000000;
dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
pkru_faults++;
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index eaea92439708..1e3da137a8bb 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -182,8 +182,10 @@ static void test_ptrace_syscall_restart(void)
if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
err(1, "PTRACE_TRACEME");
+ pid_t pid = getpid(), tid = syscall(SYS_gettid);
+
printf("\tChild will make one syscall\n");
- raise(SIGSTOP);
+ syscall(SYS_tgkill, pid, tid, SIGSTOP);
syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
_exit(0);
@@ -300,9 +302,11 @@ static void test_restart_under_ptrace(void)
if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
err(1, "PTRACE_TRACEME");
+ pid_t pid = getpid(), tid = syscall(SYS_gettid);
+
printf("\tChild will take a nap until signaled\n");
setsigign(SIGUSR1, SA_RESTART);
- raise(SIGSTOP);
+ syscall(SYS_tgkill, pid, tid, SIGSTOP);
syscall(SYS_pause, 0, 0, 0, 0, 0, 0);
_exit(0);
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index a0972dea9e6c..193556507957 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -463,7 +463,7 @@ static void set_signal(void)
sigaction(SIGTERM, &act, NULL);
sigaction(SIGINT, &act, NULL);
act.sa_handler = SIG_IGN;
- sigaction(SIGCLD, &act, NULL);
+ sigaction(SIGCHLD, &act, NULL);
}
static const char *pid_file;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1b20768e781d..eaae7252f60c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -976,8 +976,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
- if ((slot->id >= KVM_USER_MEM_SLOTS) ||
- (slot->id == id))
+ if (slot->id == id)
continue;
if (!((base_gfn + npages <= slot->base_gfn) ||
(base_gfn >= slot->base_gfn + slot->npages)))