aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/rcar-audmapp.txt6
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/mxs-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/video/analog-tv-connector.txt4
-rw-r--r--Documentation/kernel-parameters.txt1
-rw-r--r--MAINTAINERS21
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h25
-rw-r--r--arch/arm/include/asm/xen/page.h9
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm/xen/p2m.c66
-rw-r--r--arch/arm64/kernel/irq.c12
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/sys_compat.c6
-rw-r--r--arch/microblaze/Kconfig6
-rw-r--r--arch/microblaze/include/asm/entry.h1
-rw-r--r--arch/microblaze/include/asm/uaccess.h4
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/parisc/Kconfig16
-rw-r--r--arch/parisc/hpux/sys_hpux.c2
-rw-r--r--arch/parisc/include/asm/seccomp.h16
-rw-r--r--arch/parisc/include/asm/thread_info.h5
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h5
-rw-r--r--arch/parisc/kernel/ptrace.c6
-rw-r--r--arch/parisc/kernel/syscall.S233
-rw-r--r--arch/parisc/kernel/syscall_table.S3
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/celleb_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_le_defconfig1
-rw-r--r--arch/powerpc/include/asm/ptrace.h7
-rw-r--r--arch/powerpc/include/asm/systbl.h3
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h3
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c36
-rw-r--r--arch/s390/include/asm/ipl.h8
-rw-r--r--arch/s390/kernel/ipl.c128
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S10
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S10
-rw-r--r--arch/sh/mm/gup.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h1
-rw-r--r--arch/x86/xen/mmu.c27
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq.c91
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/genhd.c24
-rw-r--r--block/partition-generic.c2
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c2
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/battery.c14
-rw-r--r--drivers/ata/ahci.c22
-rw-r--r--drivers/ata/ahci_tegra.c14
-rw-r--r--drivers/ata/ahci_xgene.c61
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/pata_jmicron.c12
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/null_blk.c29
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/cpufreq/cpufreq_opp.c2
-rw-r--r--drivers/dma/dma-jz4740.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c409
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c46
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c30
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/input/keyboard/cap1106.c4
-rw-r--r--drivers/input/keyboard/matrix_keypad.c9
-rw-r--r--drivers/input/mouse/alps.c4
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/synaptics.c68
-rw-r--r--drivers/input/mouse/synaptics.h11
-rw-r--r--drivers/input/mouse/synaptics_usb.c6
-rw-r--r--drivers/input/mouse/trackpoint.c3
-rw-r--r--drivers/input/serio/serport.c45
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c25
-rw-r--r--drivers/input/touchscreen/wm9712.c2
-rw-r--r--drivers/input/touchscreen/wm9713.c2
-rw-r--r--drivers/iommu/arm-smmu.c127
-rw-r--r--drivers/iommu/dmar.c3
-rw-r--r--drivers/iommu/fsl_pamu_domain.c10
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/irqchip/exynos-combiner.c1
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c38
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/misc/lattice-ecp3-config.c5
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c86
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c209
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c38
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c34
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c142
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h5
-rw-r--r--drivers/net/phy/phy.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c15
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/ntb/ntb_transport.c17
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c1
-rw-r--r--drivers/phy/phy-twl4030-usb.c121
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c16
-rw-r--r--drivers/s390/net/qeth_l2_sys.c7
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/staging/android/sync.c1
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c3
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c1
-rw-r--r--drivers/tty/serial/atmel_serial.c43
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c7
-rw-r--r--drivers/usb/core/hub.c4
-rw-r--r--drivers/usb/dwc2/gadget.c52
-rw-r--r--drivers/usb/dwc3/core.c13
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c2
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/gadget/function/f_fs.c66
-rw-r--r--drivers/usb/gadget/function/u_fs.h2
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h2
-rw-r--r--drivers/usb/gadget/udc/net2280.c2
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci.c12
-rw-r--r--drivers/usb/musb/musb_cppi41.c17
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c8
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c72
-rw-r--r--drivers/usb/renesas_usbhs/mod.c5
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c13
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/sierra.c9
-rw-r--r--drivers/usb/serial/zte_ev.c8
-rw-r--r--drivers/usb/storage/uas-detect.h27
-rw-r--r--drivers/usb/storage/unusual_devs.h6
-rw-r--r--drivers/uwb/lc-dev.c13
-rw-r--r--drivers/video/fbdev/amba-clcd.c4
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/gntalloc.c16
-rw-r--r--drivers/xen/manage.c7
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c191
-rw-r--r--fs/btrfs/ioctl.c32
-rw-r--r--fs/btrfs/tree-log.c77
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/cifs/Kconfig35
-rw-r--r--fs/cifs/cifsglob.h5
-rw-r--r--fs/cifs/connect.c21
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/inode.c5
-rw-r--r--fs/cifs/readdir.c4
-rw-r--r--fs/cifs/sess.c7
-rw-r--r--fs/cifs/smb2file.c2
-rw-r--r--fs/cifs/smb2inode.c2
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/cifs/smb2pdu.c7
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/lockd/svc.c4
-rw-r--r--fs/namei.c75
-rw-r--r--fs/nfs/client.c12
-rw-r--r--fs/nfs/filelayout/filelayout.c5
-rw-r--r--fs/nfs/nfs4_fs.h13
-rw-r--r--fs/nfs/nfs4state.c24
-rw-r--r--fs/nfsd/nfs4xdr.c14
-rw-r--r--fs/notify/fdinfo.c4
-rw-r--r--fs/udf/ialloc.c28
-rw-r--r--fs/udf/inode.c161
-rw-r--r--fs/udf/namei.c157
-rw-r--r--fs/udf/super.c69
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/hash.h4
-rw-r--r--include/linux/jiffies.h12
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter.h5
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/netns/ieee802154_6lowpan.h1
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/wimax.h2
-rw-r--r--include/trace/events/irq.h4
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--kernel/cgroup.c47
-rw-r--r--kernel/futex.c1
-rw-r--r--kernel/kcmp.c7
-rw-r--r--kernel/printk/printk.c6
-rw-r--r--kernel/time/alarmtimer.c34
-rw-r--r--kernel/time/time.c56
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/string.c4
-rw-r--r--mm/memblock.c4
-rw-r--r--mm/mmap.c16
-rw-r--r--mm/nobootmem.c2
-rw-r--r--mm/percpu-vm.c22
-rw-r--r--mm/percpu.c2
-rw-r--r--net/bluetooth/hci_conn.c8
-rw-r--r--net/bluetooth/hci_core.c14
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/ceph/auth_x.c256
-rw-r--r--net/ceph/mon_client.c8
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c77
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c34
-rw-r--r--net/ieee802154/6lowpan_rtnl.c4
-rw-r--r--net/ieee802154/reassembly.c15
-rw-r--r--net/ipv4/netfilter/Kconfig102
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv6/addrconf.c29
-rw-r--r--net/ipv6/anycast.c10
-rw-r--r--net/ipv6/mcast.c14
-rw-r--r--net/ipv6/netfilter/Kconfig28
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mesh_plink.c7
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/mac80211/sta_info.c7
-rw-r--r--net/mac802154/wpan.c6
-rw-r--r--net/netfilter/Kconfig14
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/core.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c20
-rw-r--r--net/netfilter/xt_cgroup.c2
-rw-r--r--net/openvswitch/datapath.c11
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c5
-rwxr-xr-xscripts/checkpatch.pl5
-rw-r--r--tools/usb/usbip/libsrc/usbip_common.h2
331 files changed, 3961 insertions, 2087 deletions
diff --git a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt
index 9f1d750d76de..61bca509d7b9 100644
--- a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt
+++ b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt
@@ -16,9 +16,9 @@ Example:
* DMA client
Required properties:
-- dmas: a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs,
- where SRS/DRS values are fixed handles, specified in the SoC
- manual as the value that would be written into the PDMACHCR.
+- dmas: a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs.
+ where SRS/DRS are specified in the SoC manual.
+ It will be written into PDMACHCR as high 16-bit parts.
- dma-names: a list of DMA channel names, one per "dmas" entry
Example:
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 9b03c57563a4..e45ac3f926b1 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -39,6 +39,10 @@ Optional properties:
further clocks may be specified in derived bindings.
- clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth".
+- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
+ available this clock is used for programming the Timestamp Addend Register.
+ If not passed then the system clock will be used and this is fine on some
+ platforms.
Examples:
diff --git a/Documentation/devicetree/bindings/usb/mxs-phy.txt b/Documentation/devicetree/bindings/usb/mxs-phy.txt
index fe3eed89e4c3..379b84a567cc 100644
--- a/Documentation/devicetree/bindings/usb/mxs-phy.txt
+++ b/Documentation/devicetree/bindings/usb/mxs-phy.txt
@@ -6,6 +6,7 @@ Required properties:
* "fsl,imx6q-usbphy" for imx6dq and imx6dl
* "fsl,imx6sl-usbphy" for imx6sl
* "fsl,vf610-usbphy" for Vybrid vf610
+ * "fsl,imx6sx-usbphy" for imx6sx
"fsl,imx23-usbphy" is still a fallback for other strings
- reg: Should contain registers location and length
- interrupts: Should contain phy interrupt
diff --git a/Documentation/devicetree/bindings/video/analog-tv-connector.txt b/Documentation/devicetree/bindings/video/analog-tv-connector.txt
index 0218fcdc1299..0c0970c210ab 100644
--- a/Documentation/devicetree/bindings/video/analog-tv-connector.txt
+++ b/Documentation/devicetree/bindings/video/analog-tv-connector.txt
@@ -2,7 +2,7 @@ Analog TV Connector
===================
Required properties:
-- compatible: "composite-connector" or "svideo-connector"
+- compatible: "composite-video-connector" or "svideo-connector"
Optional properties:
- label: a symbolic name for the connector
@@ -14,7 +14,7 @@ Example
-------
tv: connector {
- compatible = "composite-connector";
+ compatible = "composite-video-connector";
label = "tv";
port {
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5ae8608ca9f5..10d51c2f10d7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3541,6 +3541,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
bogus residue values);
s = SINGLE_LUN (the device has only one
Logical Unit);
+ u = IGNORE_UAS (don't bind to the uas driver);
w = NO_WP_DETECT (don't test whether the
medium is write-protected).
Example: quirks=0419:aaf5:rl,0421:0433:rc
diff --git a/MAINTAINERS b/MAINTAINERS
index 96568a8f961b..3ca017a3ddea 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6425,7 +6425,8 @@ F: Documentation/scsi/NinjaSCSI.txt
F: drivers/scsi/nsp32*
NTB DRIVER
-M: Jon Mason <jon.mason@intel.com>
+M: Jon Mason <jdmason@kudzu.us>
+M: Dave Jiang <dave.jiang@intel.com>
S: Supported
W: https://github.com/jonmason/ntb/wiki
T: git git://github.com/jonmason/ntb.git
@@ -7054,7 +7055,7 @@ S: Maintained
F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
-M: Tomasz Figa <t.figa@samsung.com>
+M: Tomasz Figa <tomasz.figa@gmail.com>
M: Thomas Abraham <thomas.abraham@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -7900,7 +7901,8 @@ S: Supported
F: drivers/media/i2c/s5k5baf.c
SAMSUNG SOC CLOCK DRIVERS
-M: Tomasz Figa <t.figa@samsung.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+M: Tomasz Figa <tomasz.figa@gmail.com>
S: Supported
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
F: drivers/clk/samsung/
@@ -7913,6 +7915,19 @@ S: Supported
L: netdev@vger.kernel.org
F: drivers/net/ethernet/samsung/sxgbe/
+SAMSUNG USB2 PHY DRIVER
+M: Kamil Debski <k.debski@samsung.com>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/phy/samsung-phy.txt
+F: Documentation/phy/samsung-usb2.txt
+F: drivers/phy/phy-exynos4210-usb2.c
+F: drivers/phy/phy-exynos4x12-usb2.c
+F: drivers/phy/phy-exynos5250-usb2.c
+F: drivers/phy/phy-s5pv210-usb2.c
+F: drivers/phy/phy-samsung-usb2.c
+F: drivers/phy/phy-samsung-usb2.h
+
SERIAL DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-serial@vger.kernel.org
diff --git a/Makefile b/Makefile
index 1a60bdd05c9a..036b7331d7b4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1f75ec..4361777a08d8 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -93,7 +93,7 @@
};
tv: connector {
- compatible = "composite-connector";
+ compatible = "composite-video-connector";
label = "tv";
port {
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 1109017499e5..e8275ea88e88 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+ struct dma_attrs *attrs);
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index ded062f9b358..135c24a5ba26 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -33,7 +33,6 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
- unsigned long pfn;
-
- if (phys_to_mach.rb_node != NULL) {
- pfn = __mfn_to_pfn(mfn);
- if (pfn != INVALID_P2M_ENTRY)
- return pfn;
- }
-
return mfn;
}
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 12969523414c..1f85bfe6b470 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 98544c5f86e9..0e15f011f9c8 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
+
+ if (!xen_feature(XENFEAT_grant_map_identity)) {
+ pr_warn("Please upgrade your Xen.\n"
+ "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+ }
+
if (xen_feature(XENFEAT_dom0))
xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644
index 000000000000..3b99860fd7ae
--- /dev/null
+++ b/arch/arm/xen/mm32.c
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+ struct page *page;
+ unsigned long virt;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+ return 0;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL) {
+ pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
+
+ virt = (unsigned long)__va(page_to_phys(page));
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+ ptep = pte_offset_kernel(pmdp, virt);
+
+ per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+ per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+ return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (alloc_xen_mm32_scratch_page(cpu))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+ .notifier_call = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+ unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+ pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+ *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+ local_flush_tlb_kernel_page(virt);
+
+ return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+ put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+{
+ unsigned long pfn;
+ size_t left = size;
+
+ pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ do {
+ size_t len = left;
+ void *vaddr;
+
+ if (!pfn_valid(pfn))
+ {
+ /* Cannot map the page, we don't know its physical address.
+ * Return and hope for the best */
+ if (!xen_feature(XENFEAT_grant_map_identity))
+ return;
+ vaddr = xen_mm32_remap_page(handle) + offset;
+ op(vaddr, len, dir);
+ xen_mm32_unmap(vaddr - offset);
+ } else {
+ struct page *page = pfn_to_page(pfn);
+
+ if (PageHighMem(page)) {
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ if (cache_is_vipt_nonaliasing()) {
+ vaddr = kmap_atomic(page);
+ op(vaddr + offset, len, dir);
+ kunmap_atomic(vaddr);
+ } else {
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ op(vaddr + offset, len, dir);
+ kunmap_high(page);
+ }
+ }
+ } else {
+ vaddr = page_address(page) + offset;
+ op(vaddr, len, dir);
+ }
+ }
+
+ offset = 0;
+ pfn++;
+ left -= len;
+ } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ /* Cannot use __dma_page_dev_to_cpu because we don't have a
+ * struct page for handle */
+
+ if (dir != DMA_TO_DEVICE)
+ outer_inv_range(handle, handle + size);
+
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+ if (dir == DMA_FROM_DEVICE) {
+ outer_inv_range(handle, handle + size);
+ } else {
+ outer_clean_range(handle, handle + size);
+ }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+
+{
+ if (!__generic_dma_ops(hwdev)->unmap_page)
+ return;
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ return;
+
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ return;
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+ return;
+ __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+ int cpu;
+
+ if (!xen_initial_domain())
+ return 0;
+
+ register_cpu_notifier(&xen_mm32_cpu_notifier);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (alloc_xen_mm32_scratch_page(cpu)) {
+ put_online_cpus();
+ unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+ return -ENOMEM;
+ }
+ }
+ put_online_cpus();
+
+ return 0;
+}
+arch_initcall(xen_mm32_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 97baf4427817..054857776254 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
unsigned long pfn;
unsigned long mfn;
unsigned long nr_pages;
- struct rb_node rbnode_mach;
struct rb_node rbnode_phys;
};
static rwlock_t p2m_lock;
struct rb_root phys_to_mach = RB_ROOT;
EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
{
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
- if (new->mfn == entry->mfn)
- goto err_out;
if (new->pfn == entry->pfn)
goto err_out;
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(__pfn_to_mfn);
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
- struct rb_node **link = &mach_to_phys.rb_node;
- struct rb_node *parent = NULL;
- struct xen_p2m_entry *entry;
- int rc = 0;
-
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
- if (new->mfn == entry->mfn)
- goto err_out;
- if (new->pfn == entry->pfn)
- goto err_out;
-
- if (new->mfn < entry->mfn)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
- rb_link_node(&new->rbnode_mach, parent, link);
- rb_insert_color(&new->rbnode_mach, &mach_to_phys);
- goto out;
-
-err_out:
- rc = -EINVAL;
- pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
- __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
- return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
- struct rb_node *n = mach_to_phys.rb_node;
- struct xen_p2m_entry *entry;
- unsigned long irqflags;
-
- read_lock_irqsave(&p2m_lock, irqflags);
- while (n) {
- entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
- if (entry->mfn <= mfn &&
- entry->mfn + entry->nr_pages > mfn) {
- read_unlock_irqrestore(&p2m_lock, irqflags);
- return entry->pfn + (mfn - entry->mfn);
- }
- if (mfn < entry->mfn)
- n = n->rb_left;
- else
- n = n->rb_right;
- }
- read_unlock_irqrestore(&p2m_lock, irqflags);
-
- return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn &&
p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
- rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
write_unlock_irqrestore(&p2m_lock, irqflags);
kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry->mfn = mfn;
write_lock_irqsave(&p2m_lock, irqflags);
- if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
- (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+ if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
write_unlock_irqrestore(&p2m_lock, irqflags);
return false;
}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0f08dfd69ebc..dfa6e3e74fdd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
ret = true;
+ }
- /*
- * when using forced irq_set_affinity we must ensure that the cpu
- * being offlined is not present in the affinity mask, it may be
- * selected as the target CPU otherwise
- */
- affinity = cpu_online_mask;
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1309d64aa926..29d48690f2ac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -230,9 +230,27 @@ void exit_thread(void)
{
}
+static void tls_thread_flush(void)
+{
+ asm ("msr tpidr_el0, xzr");
+
+ if (is_compat_task()) {
+ current->thread.tp_value = 0;
+
+ /*
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ */
+ barrier();
+ asm ("msr tpidrro_el0, xzr");
+ }
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
+ tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
}
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index de2b0226e06d..dc47e53e9e28 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0];
+
+ /*
+ * Protect against register corruption from context switch.
+ * See comment in tls_thread_flush.
+ */
+ barrier();
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
return 0;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 40e1c1dd0e24..6feded3b0c4c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -127,7 +127,7 @@ config SECCOMP
endmenu
-menu "Advanced setup"
+menu "Kernel features"
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
@@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES
endchoice
-endmenu
-
source "mm/Kconfig"
+endmenu
+
menu "Executable file formats"
source "fs/Kconfig.binfmt"
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index b4a4cb150aa9..596e485ae707 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -15,6 +15,7 @@
#include <asm/percpu.h>
#include <asm/ptrace.h>
+#include <linux/linkage.h>
/*
* These are per-cpu variables required in entry.S, among other
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 0aa005703a0b..59a89a64a865 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr,
if ((get_fs().seg < ((unsigned long)addr)) ||
(get_fs().seg < ((unsigned long)addr + size - 1))) {
- pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+ pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
(u32)get_fs().seg);
return 0;
}
ok:
- pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+ pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
(u32)get_fs().seg);
return 1;
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index fd56a8f66489..ea4b233647c1 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 381
+#define __NR_syscalls 387
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 6e75e2030927..1554a6f2a5bb 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -321,6 +321,22 @@ source "fs/Kconfig"
source "arch/parisc/Kconfig.debug"
+config SECCOMP
+ def_bool y
+ prompt "Enable seccomp to safely compute untrusted bytecode"
+ ---help---
+ This kernel feature is useful for number crunching applications
+ that may need to compute untrusted bytecode during their
+ execution. By using pipes or other transports made available to
+ the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in
+ their own address space using seccomp. Once seccomp is
+ enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+ and the task is only allowed to execute a few safe syscalls
+ defined by each seccomp mode.
+
+ If unsure, say Y. Only embedded should say N here.
+
source "security/Kconfig"
source "crypto/Kconfig"
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index d9dc6cd3b7d2..e5c4da035810 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
}
/* String could be altered by userspace after strlen_user() */
- fsname[len] = '\0';
+ fsname[len - 1] = '\0';
printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname);
if ( !strcmp(fsname, "hfs") ) {
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h
new file mode 100644
index 000000000000..015f7887aa29
--- /dev/null
+++ b/arch/parisc/include/asm/seccomp.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_PARISC_SECCOMP_H
+#define _ASM_PARISC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+
+#endif /* _ASM_PARISC_SECCOMP_H */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 4b9b10ce1f9d..a84611835549 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -60,6 +60,7 @@ struct thread_info {
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9 /* single stepping? */
#define TIF_BLOCKSTEP 10 /* branch stepping? */
+#define TIF_SECCOMP 11 /* secure computing */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -70,11 +71,13 @@ struct thread_info {
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
_TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
- _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
+ _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP)
#ifdef CONFIG_64BIT
# ifdef CONFIG_COMPAT
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 47e0e21d2272..8667f18be238 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -830,8 +830,11 @@
#define __NR_sched_getattr (__NR_Linux + 335)
#define __NR_utimes (__NR_Linux + 336)
#define __NR_renameat2 (__NR_Linux + 337)
+#define __NR_seccomp (__NR_Linux + 338)
+#define __NR_getrandom (__NR_Linux + 339)
+#define __NR_memfd_create (__NR_Linux + 340)
-#define __NR_Linux_syscalls (__NR_renameat2 + 1)
+#define __NR_Linux_syscalls (__NR_memfd_create + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index e842ee233db4..3bab72462ab5 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -270,6 +270,12 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
+ /* Do the secure computing check first. */
+ if (secure_computing(regs->gr[20])) {
+ /* seccomp failures shouldn't expose any additional code. */
+ return -1;
+ }
+
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
ret = -1L;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 838786011037..7ef22e3387e0 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
-#define __NR_lws_entries (2)
+#define __NR_lws_entries (3)
lws_entry:
gate lws_start, %r0 /* increase privilege */
@@ -502,7 +502,7 @@ lws_exit:
/***************************************************
- Implementing CAS as an atomic operation:
+ Implementing 32bit CAS as an atomic operation:
%r26 - Address to examine
%r25 - Old value to check (old)
@@ -659,6 +659,230 @@ cas_action:
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
+ /***************************************************
+ New CAS implementation which uses pointers and variable size
+ information. The value pointed by old and new MUST NOT change
+ while performing CAS. The lock only protect the value at %r26.
+
+ %r26 - Address to examine
+ %r25 - Pointer to the value to check (old)
+ %r24 - Pointer to the value to set (new)
+ %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+ %r28 - Return non-zero on failure
+ %r21 - Kernel error code
+
+ %r21 has the following meanings:
+
+ EAGAIN - CAS is busy, ldcw failed, try again.
+ EFAULT - Read or write failed.
+
+ Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
+
+ ****************************************************/
+
+ /* ELF32 Process entry path */
+lws_compare_and_swap_2:
+#ifdef CONFIG_64BIT
+ /* Clip the input registers */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
+ depdi 0, 31, 32, %r23
+#endif
+
+ /* Check the validity of the size pointer */
+ subi,>>= 4, %r23, %r0
+ b,n lws_exit_nosys
+
+ /* Jump to the functions which will load the old and new values into
+ registers depending on the their size */
+ shlw %r23, 2, %r29
+ blr %r29, %r0
+ nop
+
+ /* 8bit load */
+4: ldb 0(%sr3,%r25), %r25
+ b cas2_lock_start
+5: ldb 0(%sr3,%r24), %r24
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ /* 16bit load */
+6: ldh 0(%sr3,%r25), %r25
+ b cas2_lock_start
+7: ldh 0(%sr3,%r24), %r24
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ /* 32bit load */
+8: ldw 0(%sr3,%r25), %r25
+ b cas2_lock_start
+9: ldw 0(%sr3,%r24), %r24
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ /* 64bit load */
+#ifdef CONFIG_64BIT
+10: ldd 0(%sr3,%r25), %r25
+11: ldd 0(%sr3,%r24), %r24
+#else
+ /* Load new value into r22/r23 - high/low */
+10: ldw 0(%sr3,%r25), %r22
+11: ldw 4(%sr3,%r25), %r23
+ /* Load new value into fr4 for atomic store later */
+12: flddx 0(%sr3,%r24), %fr4
+#endif
+
+cas2_lock_start:
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r20
+ ldo R%lws_lock_start(%r20), %r28
+
+ /* Extract four bits from r26 and hash lock (Bits 4-7) */
+ extru %r26, 27, 4, %r20
+
+ /* Find lock to use, the hash is either one of 0 to
+ 15, multiplied by 16 (keep it 16-byte aligned)
+ and add to the lock table offset. */
+ shlw %r20, 4, %r20
+ add %r20, %r28, %r20
+
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+ /* COW breaks can cause contention on UP systems */
+ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
+ cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
+cas2_wouldblock:
+ ldo 2(%r0), %r28 /* 2nd case */
+ ssm PSW_SM_I, %r0
+ b lws_exit /* Contended... */
+ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+
+ /*
+ prev = *addr;
+ if ( prev == old )
+ *addr = new;
+ return prev;
+ */
+
+ /* NOTES:
+ This all works becuse intr_do_signal
+ and schedule both check the return iasq
+ and see that we are on the kernel page
+ so this process is never scheduled off
+ or is ever sent any signal of any sort,
+ thus it is wholly atomic from usrspaces
+ perspective
+ */
+cas2_action:
+ /* Jump to the correct function */
+ blr %r29, %r0
+ /* Set %r28 as non-zero for now */
+ ldo 1(%r0),%r28
+
+ /* 8bit CAS */
+13: ldb,ma 0(%sr3,%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+14: stb,ma %r24, 0(%sr3,%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 16bit CAS */
+15: ldh,ma 0(%sr3,%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+16: sth,ma %r24, 0(%sr3,%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 32bit CAS */
+17: ldw,ma 0(%sr3,%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+18: stw,ma %r24, 0(%sr3,%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 64bit CAS */
+#ifdef CONFIG_64BIT
+19: ldd,ma 0(%sr3,%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+20: std,ma %r24, 0(%sr3,%r26)
+ copy %r0, %r28
+#else
+ /* Compare first word */
+19: ldw,ma 0(%sr3,%r26), %r29
+ sub,= %r29, %r22, %r0
+ b,n cas2_end
+ /* Compare second word */
+20: ldw,ma 4(%sr3,%r26), %r29
+ sub,= %r29, %r23, %r0
+ b,n cas2_end
+ /* Perform the store */
+21: fstdx %fr4, 0(%sr3,%r26)
+ copy %r0, %r28
+#endif
+
+cas2_end:
+ /* Free lock */
+ stw,ma %r20, 0(%sr2,%r20)
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
+ /* Return to userspace, set no error */
+ b lws_exit
+ copy %r0, %r21
+
+22:
+ /* Error occurred on load or store */
+ /* Free lock */
+ stw %r20, 0(%sr2,%r20)
+ ssm PSW_SM_I, %r0
+ ldo 1(%r0),%r28
+ b lws_exit
+ ldo -EFAULT(%r0),%r21 /* set errno */
+ nop
+ nop
+ nop
+
+ /* Exception table entries, for the load and store, return EFAULT.
+ Each of the entries must be relocated. */
+ ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+ ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
+#endif
+
/* Make sure nothing else is placed on this page */
.align PAGE_SIZE
END(linux_gateway_page)
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
/* Light-weight-syscall table */
/* Start of lws table. */
ENTRY(lws_table)
- LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
- LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
+ LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
+ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
+ LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
END(lws_table)
/* End of lws table */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 84c5d3a58fa1..b563d9c8268b 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -433,6 +433,9 @@
ENTRY_SAME(sched_getattr) /* 335 */
ENTRY_COMP(utimes)
ENTRY_SAME(renameat2)
+ ENTRY_SAME(seccomp)
+ ENTRY_SAME(getrandom)
+ ENTRY_SAME(memfd_create) /* 340 */
/* Nothing yet */
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 4bee1a6d41d0..45fd06cdc3e8 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -5,6 +5,7 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
index 6d7b22f41b50..77d7bf3ca2ac 100644
--- a/arch/powerpc/configs/celleb_defconfig
+++ b/arch/powerpc/configs/celleb_defconfig
@@ -5,6 +5,7 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 4b07bade1ba9..269d6e47c67d 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=24
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 3c72fa615bd9..7594c5ac6481 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 95e545d9f25c..c8b6a9ddb21b 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index cec044a3ff69..e5e7838af008 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index f26b267eb71f..f6c02f8cdc62 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -4,6 +4,7 @@ CONFIG_VSX=y
CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 438e813dc9cb..587f5514f9b1 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y
CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index fdee37fab81c..2e637c881d2b 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -5,6 +5,7 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_LZMA=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index a905063281cc..50375f1f59e7 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -5,6 +5,7 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_IRQ_DOMAIN_DEBUG=y
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
index 58e3dbf43ca4..4428ee428f4e 100644
--- a/arch/powerpc/configs/pseries_le_defconfig
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_IRQ_DOMAIN_DEBUG=y
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 279b80f3bb29..c0c61fa9cd9e 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -47,6 +47,12 @@
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE 32
+#else
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
+#endif
+
/* Size of dummy stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 128
#define __SIGNAL_FRAMESIZE32 64
@@ -60,6 +66,7 @@
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
#define STACK_FRAME_MARKER 2
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
/* Size of stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 64
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 542bc0f0673f..7d8a60068805 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */
SYSCALL_SPU(sched_setattr)
SYSCALL_SPU(sched_getattr)
SYSCALL_SPU(renameat2)
+SYSCALL_SPU(seccomp)
+SYSCALL_SPU(getrandom)
+SYSCALL_SPU(memfd_create)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 5ce5552ab9f5..4e9af3fd43e7 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 358
+#define __NR_syscalls 361
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 2d526f7b48da..0688fc06e183 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -380,5 +380,8 @@
#define __NR_sched_setattr 355
#define __NR_sched_getattr 356
#define __NR_renameat2 357
+#define __NR_seccomp 358
+#define __NR_getrandom 359
+#define __NR_memfd_create 360
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 74d1e780748b..2396dda282cd 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0; /* must be 16-byte aligned */
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
- if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+ if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
return 1;
/*
* sp could decrease when we jump off an interrupt stack
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 97ac8dc33667..5e1ed1575aab 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -28,6 +28,7 @@
#include <asm/opal.h>
#include <asm/cputable.h>
+#include <asm/machdep.h>
static int opal_hmi_handler_nb_init;
struct OpalHmiEvtNode {
@@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void)
}
return 0;
}
-subsys_initcall(opal_hmi_handler_init);
+machine_subsys_initcall(powernv, opal_hmi_handler_init);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index c904583baf4b..17ee193960a0 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -113,7 +113,7 @@ out:
static int pseries_remove_mem_node(struct device_node *np)
{
const char *type;
- const unsigned int *regs;
+ const __be32 *regs;
unsigned long base;
unsigned int lmb_size;
int ret = -EINVAL;
@@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np)
if (!regs)
return ret;
- base = *(unsigned long *)regs;
- lmb_size = regs[3];
+ base = be64_to_cpu(*(unsigned long *)regs);
+ lmb_size = be32_to_cpu(regs[3]);
pseries_remove_memblock(base, lmb_size);
return 0;
@@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np)
static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
- const unsigned int *regs;
+ const __be32 *regs;
unsigned long base;
unsigned int lmb_size;
int ret = -EINVAL;
@@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np)
if (!regs)
return ret;
- base = *(unsigned long *)regs;
- lmb_size = regs[3];
+ base = be64_to_cpu(*(unsigned long *)regs);
+ lmb_size = be32_to_cpu(regs[3]);
/*
* Update memory region to represent the memory add
@@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
struct of_drconf_cell *new_drmem, *old_drmem;
unsigned long memblock_size;
u32 entries;
- u32 *p;
+ __be32 *p;
int i, rc = -EINVAL;
memblock_size = pseries_memory_block_size();
if (!memblock_size)
return -EINVAL;
- p = (u32 *) pr->old_prop->value;
+ p = (__be32 *) pr->old_prop->value;
if (!p)
return -EINVAL;
@@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
* entries. Get the niumber of entries and skip to the array of
* of_drconf_cell's.
*/
- entries = *p++;
+ entries = be32_to_cpu(*p++);
old_drmem = (struct of_drconf_cell *)p;
- p = (u32 *)pr->prop->value;
+ p = (__be32 *)pr->prop->value;
p++;
new_drmem = (struct of_drconf_cell *)p;
for (i = 0; i < entries; i++) {
- if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) &&
- (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) {
- rc = pseries_remove_memblock(old_drmem[i].base_addr,
+ if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
+ (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
+ rc = pseries_remove_memblock(
+ be64_to_cpu(old_drmem[i].base_addr),
memblock_size);
break;
- } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) &&
- (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) {
- rc = memblock_add(old_drmem[i].base_addr,
+ } else if ((!(be32_to_cpu(old_drmem[i].flags) &
+ DRCONF_MEM_ASSIGNED)) &&
+ (be32_to_cpu(new_drmem[i].flags) &
+ DRCONF_MEM_ASSIGNED)) {
+ rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
memblock_size);
rc = (rc < 0) ? -EINVAL : 0;
break;
}
}
-
return rc;
}
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2fcccc0c997c..c81661e756a0 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -17,12 +17,12 @@
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_fcp))
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8)
+#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_ccw))
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8)
+#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
#define IPL_MAX_SUPPORTED_VERSION (0)
@@ -38,10 +38,11 @@ struct ipl_list_hdr {
u8 pbt;
u8 flags;
u16 reserved2;
+ u8 loadparm[8];
} __attribute__((packed));
struct ipl_block_fcp {
- u8 reserved1[313-1];
+ u8 reserved1[305-1];
u8 opt;
u8 reserved2[3];
u16 reserved3;
@@ -62,7 +63,6 @@ struct ipl_block_fcp {
offsetof(struct ipl_block_fcp, scp_data)))
struct ipl_block_ccw {
- u8 load_parm[8];
u8 reserved1[84];
u8 reserved2[2];
u16 devno;
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 22aac5885ba2..39badb9ca0b3 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
-static struct attribute *ipl_fcp_attrs[] = {
- &sys_ipl_type_attr.attr,
- &sys_ipl_device_attr.attr,
- &sys_ipl_fcp_wwpn_attr.attr,
- &sys_ipl_fcp_lun_attr.attr,
- &sys_ipl_fcp_bootprog_attr.attr,
- &sys_ipl_fcp_br_lba_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_fcp_attr_group = {
- .attrs = ipl_fcp_attrs,
-};
-
-/* CCW ipl device attributes */
-
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
@@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
+static struct attribute *ipl_fcp_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_device_attr.attr,
+ &sys_ipl_fcp_wwpn_attr.attr,
+ &sys_ipl_fcp_lun_attr.attr,
+ &sys_ipl_fcp_bootprog_attr.attr,
+ &sys_ipl_fcp_br_lba_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_fcp_attr_group = {
+ .attrs = ipl_fcp_attrs,
+};
+
+/* CCW ipl device attributes */
+
static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
@@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_fcp->ipl_info.fcp.devno);
-static struct attribute *reipl_fcp_attrs[] = {
- &sys_reipl_fcp_device_attr.attr,
- &sys_reipl_fcp_wwpn_attr.attr,
- &sys_reipl_fcp_lun_attr.attr,
- &sys_reipl_fcp_bootprog_attr.attr,
- &sys_reipl_fcp_br_lba_attr.attr,
- NULL,
-};
-
-static struct attribute_group reipl_fcp_attr_group = {
- .attrs = reipl_fcp_attrs,
-};
-
-/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- reipl_block_ccw->ipl_info.ccw.devno);
-
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
- memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
+ memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
@@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
return -EINVAL;
}
/* initialize loadparm with blanks */
- memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
+ memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
- memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
- ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
+ memcpy(ipb->hdr.loadparm, buf, lp_len);
+ ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
return len;
}
+/* FCP wrapper */
+static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_loadparm_show(reipl_block_fcp, page);
+}
+
+static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
+ __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
+ reipl_fcp_loadparm_store);
+
+static struct attribute *reipl_fcp_attrs[] = {
+ &sys_reipl_fcp_device_attr.attr,
+ &sys_reipl_fcp_wwpn_attr.attr,
+ &sys_reipl_fcp_lun_attr.attr,
+ &sys_reipl_fcp_bootprog_attr.attr,
+ &sys_reipl_fcp_br_lba_attr.attr,
+ &sys_reipl_fcp_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_fcp_attr_group = {
+ .attrs = reipl_fcp_attrs,
+};
+
+/* CCW reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+ reipl_block_ccw->ipl_info.ccw.devno);
+
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
@@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
- memcpy(ipb->ipl_info.ccw.load_parm,
- &sclp_ipl_info.loadparm, LOADPARM_LEN);
+ memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
- memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
+ memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* VM PARM */
@@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void)
return rc;
}
- if (ipl_info.type == IPL_TYPE_FCP)
+ if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
- else {
+ /*
+ * Fix loadparm: There are systems where the (SCSI) LOADPARM
+ * is invalid in the SCSI IPL parameter block, so take it
+ * always from sclp_ipl_info.
+ */
+ memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+ LOADPARM_LEN);
+ } else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
@@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void)
static int __init s390_ipl_init(void)
{
+ char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
+
sclp_get_ipl_info(&sclp_ipl_info);
+ /*
+ * Fix loadparm: There are systems where the (SCSI) LOADPARM
+ * returned by read SCP info is invalid (contains EBCDIC blanks)
+ * when the system has been booted via diag308. In that case we use
+ * the value from diag308, if available.
+ *
+ * There are also systems where diag308 store does not work in
+ * case the system is booted from HMC. Fortunately in this case
+ * READ SCP info provides the correct value.
+ */
+ if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
+ diag308_set_works)
+ memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
+ LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 65fc3979c2f1..7cf18f8d4cb4 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -22,13 +22,11 @@ __kernel_clock_gettime:
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME
- je 10f
+ je 11f
chi %r2,__CLOCK_MONOTONIC
jne 19f
/* CLOCK_MONOTONIC */
- ltr %r3,%r3
- jz 9f /* tp == NULL */
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
@@ -67,12 +65,10 @@ __kernel_clock_gettime:
j 6b
8: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
-9: lhi %r2,0
+ lhi %r2,0
br %r14
/* CLOCK_REALTIME */
-10: ltr %r3,%r3 /* tp == NULL */
- jz 18f
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
@@ -111,7 +107,7 @@ __kernel_clock_gettime:
j 15b
17: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
-18: lhi %r2,0
+ lhi %r2,0
br %r14
/* Fallback to system call */
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 91940ed33a4a..3f34e09db5f4 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -21,7 +21,7 @@ __kernel_clock_gettime:
.cfi_startproc
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME
- je 4f
+ je 5f
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
je 9f
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
@@ -30,8 +30,6 @@ __kernel_clock_gettime:
jne 12f
/* CLOCK_MONOTONIC */
- ltgr %r3,%r3
- jz 3f /* tp == NULL */
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
@@ -53,12 +51,10 @@ __kernel_clock_gettime:
j 1b
2: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
-3: lghi %r2,0
+ lghi %r2,0
br %r14
/* CLOCK_REALTIME */
-4: ltr %r3,%r3 /* tp == NULL */
- jz 8f
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
@@ -80,7 +76,7 @@ __kernel_clock_gettime:
j 6b
7: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
-8: lghi %r2,0
+ lghi %r2,0
br %r14
/* CLOCK_THREAD_CPUTIME_ID for this thread */
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index bf8daf9d9c9b..37458f38b220 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
+ __flush_anon_page(page, addr);
+ flush_dcache_page(page);
pages[*nr] = page;
(*nr)++;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 778178f4c7d1..36327438caf0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -23,6 +23,7 @@ config X86
def_bool y
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_FAST_MULTIPLIER
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_AOUT if X86_32
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index afcd35d331de..cfe3b954d5e4 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x)
#include <asm-generic/bitops/sched.h>
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 5be9063545d2..3874693c0e53 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
+extern pte_t level1_fixmap_pgt[512];
extern pgd_t init_level4_pgt[];
#define swapper_pg_dir init_level4_pgt
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e8a1201c3293..16fb0099b7f2 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
*
* We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working. We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- * NOTE: for PVH, the page tables are native.
+ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
+ * kernel has a physical mapping to start with - but that's enough to
+ * get __va working. We need to fill in the rest of the physical
+ * mapping once some sort of allocator has been set up. NOTE: for
+ * PVH, the page tables are native.
*/
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt
- * L3_i[511] -> level2_fixmap_pgt */
+ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
+
+ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
- * Both L4[272][0] and L4[511][511] have entries that point to the same
+ * Both L4[272][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
* are OK - which is what cleanup_highmap does) */
copy_page(level2_ident_pgt, l2);
- /* Graft it onto L4[511][511] */
+ /* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2);
- /* Get [511][510] and graft that in level2_fixmap_pgt */
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
- copy_page(level2_fixmap_pgt, l2);
- /* Note that we don't do anything with level1_fixmap_pgt which
- * we don't need. */
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 54535831f1e1..77881798f793 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -10,10 +10,11 @@
#include "blk.h"
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
- struct bio *bio)
+ struct bio *bio,
+ bool no_sg_merge)
{
struct bio_vec bv, bvprv = { NULL };
- int cluster, high, highprv = 1, no_sg_merge;
+ int cluster, high, highprv = 1;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
struct bvec_iter iter;
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
- no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
high = 0;
for_each_bio(bio) {
bio_for_each_segment(bv, bio, iter) {
@@ -88,18 +88,23 @@ new_segment:
void blk_recalc_rq_segments(struct request *rq)
{
- rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+ bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
+ &rq->q->queue_flags);
+
+ rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
+ no_sg_merge);
}
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+ if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+ bio->bi_vcnt < queue_max_segments(q))
bio->bi_phys_segments = bio->bi_vcnt;
else {
struct bio *nxt = bio->bi_next;
bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
bio->bi_next = nxt;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4aac82615a46..383ea0cb1f0a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1321,6 +1321,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
continue;
set->ops->exit_request(set->driver_data, tags->rqs[i],
hctx_idx, i);
+ tags->rqs[i] = NULL;
}
}
@@ -1354,8 +1355,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&tags->page_list);
- tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
- GFP_KERNEL, set->numa_node);
+ tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ set->numa_node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
return NULL;
@@ -1379,8 +1381,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
this_order--;
do {
- page = alloc_pages_node(set->numa_node, GFP_KERNEL,
- this_order);
+ page = alloc_pages_node(set->numa_node,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ this_order);
if (page)
break;
if (!this_order--)
@@ -1404,8 +1407,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
tags->rqs[i], hctx_idx, i,
- set->numa_node))
+ set->numa_node)) {
+ tags->rqs[i] = NULL;
goto fail;
+ }
}
p += rq_size;
@@ -1416,7 +1421,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
return tags;
fail:
- pr_warn("%s: failed to allocate requests\n", __func__);
blk_mq_free_rq_map(set, tags, hctx_idx);
return NULL;
}
@@ -1936,6 +1940,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+ int i;
+
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ set->tags[i] = blk_mq_init_rq_map(set, i);
+ if (!set->tags[i])
+ goto out_unwind;
+ }
+
+ return 0;
+
+out_unwind:
+ while (--i >= 0)
+ blk_mq_free_rq_map(set, set->tags[i], i);
+
+ set->tags = NULL;
+ return -ENOMEM;
+}
+
+/*
+ * Allocate the request maps associated with this tag_set. Note that this
+ * may reduce the depth asked for, if memory is tight. set->queue_depth
+ * will be updated to reflect the allocated depth.
+ */
+static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+ unsigned int depth;
+ int err;
+
+ depth = set->queue_depth;
+ do {
+ err = __blk_mq_alloc_rq_maps(set);
+ if (!err)
+ break;
+
+ set->queue_depth >>= 1;
+ if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+ err = -ENOMEM;
+ break;
+ }
+ } while (set->queue_depth);
+
+ if (!set->queue_depth || err) {
+ pr_err("blk-mq: failed to allocate request map\n");
+ return -ENOMEM;
+ }
+
+ if (depth != set->queue_depth)
+ pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+ depth, set->queue_depth);
+
+ return 0;
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2003,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
- int i;
-
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
@@ -1966,23 +2023,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
- goto out;
+ return -ENOMEM;
- for (i = 0; i < set->nr_hw_queues; i++) {
- set->tags[i] = blk_mq_init_rq_map(set, i);
- if (!set->tags[i])
- goto out_unwind;
- }
+ if (blk_mq_alloc_rq_maps(set))
+ goto enomem;
mutex_init(&set->tag_list_lock);
INIT_LIST_HEAD(&set->tag_list);
return 0;
-
-out_unwind:
- while (--i >= 0)
- blk_mq_free_rq_map(set, set->tags[i], i);
-out:
+enomem:
+ kfree(set->tags);
+ set->tags = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2049,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
}
kfree(set->tags);
+ set->tags = NULL;
}
EXPORT_SYMBOL(blk_mq_free_tag_set);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4db5abf96b9e..17f5c84ce7bf 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk)
* Initialization must be complete by now. Finish the initial
* bypass from queue allocation.
*/
- queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
- blk_queue_bypass_end(q);
+ if (!blk_queue_init_done(q)) {
+ queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ blk_queue_bypass_end(q);
+ }
ret = blk_trace_init_sysfs(dev);
if (ret)
diff --git a/block/genhd.c b/block/genhd.c
index 791f41943132..09da5e4a8e03 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -28,10 +28,10 @@ struct kobject *block_depr;
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
-/* For extended devt allocation. ext_devt_mutex prevents look up
+/* For extended devt allocation. ext_devt_lock prevents look up
* results from going away underneath its user.
*/
-static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_SPINLOCK(ext_devt_lock);
static DEFINE_IDR(ext_devt_idr);
static struct device_type disk_type;
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
}
/* allocate ext devt */
- mutex_lock(&ext_devt_mutex);
- idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
- mutex_unlock(&ext_devt_mutex);
+ idr_preload(GFP_KERNEL);
+
+ spin_lock(&ext_devt_lock);
+ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+ spin_unlock(&ext_devt_lock);
+
+ idr_preload_end();
if (idx < 0)
return idx == -ENOSPC ? -EBUSY : idx;
@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt)
return;
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
- mutex_lock(&ext_devt_mutex);
+ spin_lock(&ext_devt_lock);
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
- mutex_unlock(&ext_devt_mutex);
+ spin_unlock(&ext_devt_lock);
}
}
@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
- blk_free_devt(disk_to_dev(disk)->devt);
}
EXPORT_SYMBOL(del_gendisk);
@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
} else {
struct hd_struct *part;
- mutex_lock(&ext_devt_mutex);
+ spin_lock(&ext_devt_lock);
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
if (part && get_disk(part_to_disk(part))) {
*partno = part->partno;
disk = part_to_disk(part);
}
- mutex_unlock(&ext_devt_mutex);
+ spin_unlock(&ext_devt_lock);
}
return disk;
@@ -1098,6 +1101,7 @@ static void disk_release(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
+ blk_free_devt(dev->devt);
disk_release_events(disk);
kfree(disk->random);
disk_replace_part_tbl(disk, NULL);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 789cdea05893..0d9e5f97f0a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
struct hd_struct *p = dev_to_part(dev);
+ blk_free_devt(dev->devt);
free_part_stats(p);
free_part_info(p);
kfree(p);
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
- blk_free_devt(part_devt(part));
hd_struct_put(part);
}
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 2da8660262e5..81dc75033f15 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -33,7 +33,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
void *handler_context, void *region_context)
{
int i;
- u8 *value = (u8 *)&value64;
+ u8 *value = (u8 *)value64;
if (address > 0xff || !value64)
return AE_BAD_PARAMETER;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 9dfec48dd4e5..fddc1e86f9d0 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -610,7 +610,7 @@ static int acpi_lpss_suspend_late(struct device *dev)
return acpi_dev_suspend_late(dev);
}
-static int acpi_lpss_restore_early(struct device *dev)
+static int acpi_lpss_resume_early(struct device *dev)
{
int ret = acpi_dev_resume_early(dev);
@@ -650,15 +650,15 @@ static int acpi_lpss_runtime_resume(struct device *dev)
static struct dev_pm_domain acpi_lpss_pm_domain = {
.ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend_late = acpi_lpss_suspend_late,
- .restore_early = acpi_lpss_restore_early,
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
- .resume_early = acpi_subsys_resume_early,
+ .suspend_late = acpi_lpss_suspend_late,
+ .resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
.poweroff = acpi_subsys_suspend,
- .poweroff_late = acpi_subsys_suspend_late,
+ .poweroff_late = acpi_lpss_suspend_late,
+ .restore_early = acpi_lpss_resume_early,
#endif
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 1c162e7be045..5fdfe65fe165 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -534,20 +534,6 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
" invalid.\n");
}
- /*
- * When fully charged, some batteries wrongly report
- * capacity_now = design_capacity instead of = full_charge_capacity
- */
- if (battery->capacity_now > battery->full_charge_capacity
- && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
- if (battery->capacity_now != battery->design_capacity)
- printk_once(KERN_WARNING FW_BUG
- "battery: reported current charge level (%d) "
- "is higher than reported maximum charge level (%d).\n",
- battery->capacity_now, battery->full_charge_capacity);
- battery->capacity_now = battery->full_charge_capacity;
- }
-
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a29f8012fb08..a0cc0edafc78 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -305,6 +305,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -442,6 +450,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
@@ -1329,6 +1339,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
+ /*
+ * The JMicron chip 361/363 contains one SATA controller and one
+ * PATA controller,for powering on these both controllers, we must
+ * follow the sequence one by one, otherwise one of them can not be
+ * powered on successfully, so here we disable the async suspend
+ * method for these chips.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+ (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+ pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+ device_disable_async_suspend(&pdev->dev);
+
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index f1fef74e503c..032904402c95 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -18,14 +18,17 @@
*/
#include <linux/ahci_platform.h>
-#include <linux/reset.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
+
#include "ahci.h"
#define SATA_CONFIGURATION_0 0x180
@@ -180,9 +183,12 @@ static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
/* Pad calibration */
- /* FIXME Always use calibration 0. Change this to read the calibration
- * fuse once the fuse driver has landed. */
- val = 0;
+ ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val);
+ if (ret) {
+ dev_err(&tegra->pdev->dev,
+ "failed to read calibration fuse: %d\n", ret);
+ return ret;
+ }
calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index c6962300b93c..f03aab187f4d 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -78,6 +78,9 @@
#define CFG_MEM_RAM_SHUTDOWN 0x00000070
#define BLOCK_MEM_RDY 0x00000074
+/* Max retry for link down */
+#define MAX_LINK_DOWN_RETRY 3
+
struct xgene_ahci_context {
struct ahci_host_priv *hpriv;
struct device *dev;
@@ -145,6 +148,14 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
return rc;
}
+static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
+{
+ void __iomem *diagcsr = ctx->csr_diag;
+
+ return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
+ readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
+}
+
/**
* xgene_ahci_read_id - Read ID data from the specified device
* @dev: device
@@ -229,8 +240,11 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
* and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
* report disparity error and etc. In addition, during COMRESET, there can
* be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
- * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
- * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
+ * reboot cycle regression, sometimes the PHY reports link down even if the
+ * device is present because of speed negotiation failure. so need to retry
+ * the COMRESET to get the link up. The following algorithm is followed to
+ * proper configure the hardware PHY during COMRESET:
*
* Alg Part 1:
* 1. Start the PHY at Gen3 speed (default setting)
@@ -246,9 +260,15 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
* Alg Part 2:
* 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
* reported in the register PORT_SCR_ERR, then reset the PHY receiver line
- * 2. Go to Alg Part 3
+ * 2. Go to Alg Part 4
*
* Alg Part 3:
+ * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
+ * communication establishment failed and maximum link down attempts are
+ * less than Max attempts 3 then goto Alg Part 1.
+ * 2. Go to Alg Part 4.
+ *
+ * Alg Part 4:
* 1. Clear any pending from register PORT_SCR_ERR.
*
* NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
@@ -267,19 +287,27 @@ static int xgene_ahci_do_hardreset(struct ata_link *link,
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
+ int link_down_retry = 0;
int rc;
- u32 val;
-
- /* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(link->device, &tf);
- tf.command = ATA_BUSY;
- ata_tf_to_fis(&tf, 0, 0, d2h_fis);
- rc = sata_link_hardreset(link, timing, deadline, online,
+ u32 val, sstatus;
+
+ do {
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
+ if (*online) {
+ val = readl(port_mmio + PORT_SCR_ERR);
+ if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+ dev_warn(ctx->dev, "link has error\n");
+ break;
+ }
- val = readl(port_mmio + PORT_SCR_ERR);
- if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
- dev_warn(ctx->dev, "link has error\n");
+ sata_scr_read(link, SCR_STATUS, &sstatus);
+ } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
+ (sstatus & 0xff) == 0x1);
/* clear all errors if any pending */
val = readl(port_mmio + PORT_SCR_ERR);
@@ -467,6 +495,11 @@ static int xgene_ahci_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (xgene_ahci_is_memram_inited(ctx)) {
+ dev_info(dev, "skip clock and PHY initialization\n");
+ goto skip_clk_phy;
+ }
+
/* Due to errata, HW requires full toggle transition */
rc = ahci_platform_enable_clks(hpriv);
if (rc)
@@ -479,7 +512,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
/* Configure the host controller */
xgene_ahci_hw_init(hpriv);
-
+skip_clk_phy:
hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 893e30e9a9ef..ffbe625e6fd2 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
/* SATA Controller IDE (Coleto Creek) */
{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 4d1a5d2c4287..47e418b8c8ba 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -143,6 +143,18 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
};
const struct ata_port_info *ppi[] = { &info, NULL };
+ /*
+ * The JMicron chip 361/363 contains one SATA controller and one
+ * PATA controller,for powering on these both controllers, we must
+ * follow the sequence one by one, otherwise one of them can not be
+ * powered on successfully, so here we disable the async suspend
+ * method for these chips.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+ (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+ pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+ device_disable_async_suspend(&pdev->dev);
+
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 294a7dd25190..f032ed6dd459 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -282,6 +282,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */
{ 0, },
};
MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index db1e9560d8a7..5c8e7fe07745 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3918,7 +3918,6 @@ skip_create_disk:
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- rv = -ENOMEM;
goto block_queue_alloc_init_error;
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a3b042c4d448..00d469c7f9f7 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -462,17 +462,21 @@ static int null_add_dev(void)
struct gendisk *disk;
struct nullb *nullb;
sector_t size;
+ int rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
- if (!nullb)
+ if (!nullb) {
+ rv = -ENOMEM;
goto out;
+ }
spin_lock_init(&nullb->lock);
if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
submit_queues = nr_online_nodes;
- if (setup_queues(nullb))
+ rv = setup_queues(nullb);
+ if (rv)
goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
@@ -484,22 +488,29 @@ static int null_add_dev(void)
nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
nullb->tag_set.driver_data = nullb;
- if (blk_mq_alloc_tag_set(&nullb->tag_set))
+ rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+ if (rv)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
- if (!nullb->q)
+ if (!nullb->q) {
+ rv = -ENOMEM;
goto out_cleanup_tags;
+ }
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
- if (!nullb->q)
+ if (!nullb->q) {
+ rv = -ENOMEM;
goto out_cleanup_queues;
+ }
blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
- if (!nullb->q)
+ if (!nullb->q) {
+ rv = -ENOMEM;
goto out_cleanup_queues;
+ }
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb);
@@ -509,8 +520,10 @@ static int null_add_dev(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node);
- if (!disk)
+ if (!disk) {
+ rv = -ENOMEM;
goto out_cleanup_blk_queue;
+ }
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -544,7 +557,7 @@ out_cleanup_queues:
out_free_nullb:
kfree(nullb);
out:
- return -ENOMEM;
+ return rv;
}
static int __init null_init(void)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 623c84145b79..4b97baf8afa3 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5087,9 +5087,11 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
- rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
- if (!rbd_dev->rq_wq)
+ rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
+ if (!rbd_dev->rq_wq) {
+ ret = -ENOMEM;
goto err_out_mapping;
+ }
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c
index f7a32d2326c6..773bcde893c0 100644
--- a/drivers/cpufreq/cpufreq_opp.c
+++ b/drivers/cpufreq/cpufreq_opp.c
@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
goto out;
}
- freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
if (!freq_table) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 6a9d89c93b1f..ae2ab14e64b3 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -362,8 +362,9 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
vchan_cyclic_callback(&chan->desc->vdesc);
} else {
if (chan->next_sg == chan->desc->num_sgs) {
- chan->desc = NULL;
+ list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
}
}
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index a2cc6be97983..b792194e0d9c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
uint32_t data, jreg;
+ ast_open_key(ast);
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
}
ast->vga2_clone = false;
} else {
- ast->chip = 2000;
+ ast->chip = AST2000;
DRM_INFO("AST 2000 detected\n");
}
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2e7f03ad5ee2..9933c26017ed 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
+ /*
+ * We enable some interrupt sources in our postinstall hooks, so mark
+ * interrupts as enabled _before_ actually enabling them to avoid
+ * special cases in our ordering checks.
+ */
+ dev_priv->pm._irqs_disabled = false;
+
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_gem_stolen;
- dev_priv->pm._irqs_disabled = false;
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a830eac5ba3..3524306d8cfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,7 @@ enum hpd_pin {
if ((1 << (domain)) & (mask))
struct drm_i915_private;
+struct i915_mm_struct;
struct i915_mmu_object;
enum intel_dpll_id {
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
struct i915_gtt gtt; /* VM representing the global address space */
struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
- DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+ DECLARE_HASHTABLE(mm_structs, 7);
+ struct mutex mm_lock;
/* Kernel Modesetting */
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
- struct mm_struct *mm;
- struct i915_mmu_object *mn;
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ba7f5c6bb50d..ad55b06a3cb1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,10 +1590,13 @@ unlock:
out:
switch (ret) {
case -EIO:
- /* If this -EIO is due to a gpu hang, give the reset code a
- * chance to clean up the mess. Otherwise return the proper
- * SIGBUS. */
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ /*
+ * We eat errors when the gpu is terminally wedged to avoid
+ * userspace unduly crashing (gl has no provisions for mmaps to
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
+ * and so needs to be reported.
+ */
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
ret = VM_FAULT_SIGBUS;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index fe69fc837d9e..d38413997379 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -32,6 +32,15 @@
#include <linux/mempolicy.h>
#include <linux/swap.h>
+struct i915_mm_struct {
+ struct mm_struct *mm;
+ struct drm_device *dev;
+ struct i915_mmu_notifier *mn;
+ struct hlist_node node;
+ struct kref kref;
+ struct work_struct work;
+};
+
#if defined(CONFIG_MMU_NOTIFIER)
#include <linux/interval_tree.h>
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
struct mmu_notifier mn;
struct rb_root objects;
struct list_head linear;
- struct drm_device *dev;
- struct mm_struct *mm;
- struct work_struct work;
- unsigned long count;
unsigned long serial;
bool has_linear;
};
struct i915_mmu_object {
- struct i915_mmu_notifier *mmu;
+ struct i915_mmu_notifier *mn;
struct interval_tree_node it;
struct list_head link;
struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct i915_mmu_object *mmu;
+ struct i915_mmu_object *mo;
unsigned long serial;
restart:
serial = mn->serial;
- list_for_each_entry(mmu, &mn->linear, link) {
+ list_for_each_entry(mo, &mn->linear, link) {
struct drm_i915_gem_object *obj;
- if (mmu->it.last < start || mmu->it.start > end)
+ if (mo->it.last < start || mo->it.start > end)
continue;
- obj = mmu->obj;
+ obj = mo->obj;
drm_gem_object_reference(&obj->base);
spin_unlock(&mn->lock);
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
};
static struct i915_mmu_notifier *
-__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_mmu_notifier *mmu;
-
- /* Protected by dev->struct_mutex */
- hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
- if (mmu->mm == mm)
- return mmu;
-
- return NULL;
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+i915_mmu_notifier_create(struct mm_struct *mm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_mmu_notifier *mmu;
+ struct i915_mmu_notifier *mn;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
-
- mmu = __i915_mmu_notifier_lookup(dev, mm);
- if (mmu)
- return mmu;
-
- mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
- if (mmu == NULL)
+ mn = kmalloc(sizeof(*mn), GFP_KERNEL);
+ if (mn == NULL)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&mmu->lock);
- mmu->dev = dev;
- mmu->mn.ops = &i915_gem_userptr_notifier;
- mmu->mm = mm;
- mmu->objects = RB_ROOT;
- mmu->count = 0;
- mmu->serial = 1;
- INIT_LIST_HEAD(&mmu->linear);
- mmu->has_linear = false;
-
- /* Protected by mmap_sem (write-lock) */
- ret = __mmu_notifier_register(&mmu->mn, mm);
+ spin_lock_init(&mn->lock);
+ mn->mn.ops = &i915_gem_userptr_notifier;
+ mn->objects = RB_ROOT;
+ mn->serial = 1;
+ INIT_LIST_HEAD(&mn->linear);
+ mn->has_linear = false;
+
+ /* Protected by mmap_sem (write-lock) */
+ ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
- kfree(mmu);
+ kfree(mn);
return ERR_PTR(ret);
}
- /* Protected by dev->struct_mutex */
- hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
- return mmu;
+ return mn;
}
-static void
-__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
{
- struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
- mmu_notifier_unregister(&mmu->mn, mmu->mm);
- kfree(mmu);
-}
-
-static void
-__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
-{
- lockdep_assert_held(&mmu->dev->struct_mutex);
-
- /* Protected by dev->struct_mutex */
- hash_del(&mmu->node);
-
- /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
- * We enter the function holding struct_mutex, therefore we need
- * to drop our mutex prior to calling mmu_notifier_unregister in
- * order to prevent lock inversion (and system-wide deadlock)
- * between the mmap_sem and struct-mutex. Hence we defer the
- * unregistration to a workqueue where we hold no locks.
- */
- INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
- schedule_work(&mmu->work);
-}
-
-static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
-{
- if (++mmu->serial == 0)
- mmu->serial = 1;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
-{
- struct i915_mmu_object *mn;
-
- list_for_each_entry(mn, &mmu->linear, link)
- if (mn->is_linear)
- return true;
-
- return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
- struct i915_mmu_object *mn)
-{
- lockdep_assert_held(&mmu->dev->struct_mutex);
-
- spin_lock(&mmu->lock);
- list_del(&mn->link);
- if (mn->is_linear)
- mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
- else
- interval_tree_remove(&mn->it, &mmu->objects);
- __i915_mmu_notifier_update_serial(mmu);
- spin_unlock(&mmu->lock);
-
- /* Protected against _add() by dev->struct_mutex */
- if (--mmu->count == 0)
- __i915_mmu_notifier_destroy(mmu);
+ if (++mn->serial == 0)
+ mn->serial = 1;
}
static int
-i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
- struct i915_mmu_object *mn)
+i915_mmu_notifier_add(struct drm_device *dev,
+ struct i915_mmu_notifier *mn,
+ struct i915_mmu_object *mo)
{
struct interval_tree_node *it;
int ret;
- ret = i915_mutex_lock_interruptible(mmu->dev);
+ ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
* remove the objects from the interval tree) before we do
* the check for overlapping objects.
*/
- i915_gem_retire_requests(mmu->dev);
+ i915_gem_retire_requests(dev);
- spin_lock(&mmu->lock);
- it = interval_tree_iter_first(&mmu->objects,
- mn->it.start, mn->it.last);
+ spin_lock(&mn->lock);
+ it = interval_tree_iter_first(&mn->objects,
+ mo->it.start, mo->it.last);
if (it) {
struct drm_i915_gem_object *obj;
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!obj->userptr.workers)
- mmu->has_linear = mn->is_linear = true;
+ mn->has_linear = mo->is_linear = true;
else
ret = -EAGAIN;
} else
- interval_tree_insert(&mn->it, &mmu->objects);
+ interval_tree_insert(&mo->it, &mn->objects);
if (ret == 0) {
- list_add(&mn->link, &mmu->linear);
- __i915_mmu_notifier_update_serial(mmu);
+ list_add(&mo->link, &mn->linear);
+ __i915_mmu_notifier_update_serial(mn);
}
- spin_unlock(&mmu->lock);
- mutex_unlock(&mmu->dev->struct_mutex);
+ spin_unlock(&mn->lock);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
+{
+ struct i915_mmu_object *mo;
+
+ list_for_each_entry(mo, &mn->linear, link)
+ if (mo->is_linear)
+ return true;
+
+ return false;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
+ struct i915_mmu_object *mo)
+{
+ spin_lock(&mn->lock);
+ list_del(&mo->link);
+ if (mo->is_linear)
+ mn->has_linear = i915_mmu_notifier_has_linear(mn);
+ else
+ interval_tree_remove(&mo->it, &mn->objects);
+ __i915_mmu_notifier_update_serial(mn);
+ spin_unlock(&mn->lock);
+}
+
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_object *mn;
+ struct i915_mmu_object *mo;
- mn = obj->userptr.mn;
- if (mn == NULL)
+ mo = obj->userptr.mmu_object;
+ if (mo == NULL)
return;
- i915_mmu_notifier_del(mn->mmu, mn);
- obj->userptr.mn = NULL;
+ i915_mmu_notifier_del(mo->mn, mo);
+ kfree(mo);
+
+ obj->userptr.mmu_object = NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_find(struct i915_mm_struct *mm)
+{
+ if (mm->mn == NULL) {
+ down_write(&mm->mm->mmap_sem);
+ mutex_lock(&to_i915(mm->dev)->mm_lock);
+ if (mm->mn == NULL)
+ mm->mn = i915_mmu_notifier_create(mm->mm);
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ up_write(&mm->mm->mmap_sem);
+ }
+ return mm->mn;
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
- struct i915_mmu_notifier *mmu;
- struct i915_mmu_object *mn;
+ struct i915_mmu_notifier *mn;
+ struct i915_mmu_object *mo;
int ret;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- down_write(&obj->userptr.mm->mmap_sem);
- ret = i915_mutex_lock_interruptible(obj->base.dev);
- if (ret == 0) {
- mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
- if (!IS_ERR(mmu))
- mmu->count++; /* preemptive add to act as a refcount */
- else
- ret = PTR_ERR(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- }
- up_write(&obj->userptr.mm->mmap_sem);
- if (ret)
- return ret;
+ if (WARN_ON(obj->userptr.mm == NULL))
+ return -EINVAL;
- mn = kzalloc(sizeof(*mn), GFP_KERNEL);
- if (mn == NULL) {
- ret = -ENOMEM;
- goto destroy_mmu;
- }
+ mn = i915_mmu_notifier_find(obj->userptr.mm);
+ if (IS_ERR(mn))
+ return PTR_ERR(mn);
- mn->mmu = mmu;
- mn->it.start = obj->userptr.ptr;
- mn->it.last = mn->it.start + obj->base.size - 1;
- mn->obj = obj;
+ mo = kzalloc(sizeof(*mo), GFP_KERNEL);
+ if (mo == NULL)
+ return -ENOMEM;
- ret = i915_mmu_notifier_add(mmu, mn);
- if (ret)
- goto free_mn;
+ mo->mn = mn;
+ mo->it.start = obj->userptr.ptr;
+ mo->it.last = mo->it.start + obj->base.size - 1;
+ mo->obj = obj;
- obj->userptr.mn = mn;
+ ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
+ if (ret) {
+ kfree(mo);
+ return ret;
+ }
+
+ obj->userptr.mmu_object = mo;
return 0;
+}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ if (mn == NULL)
+ return;
-free_mn:
+ mmu_notifier_unregister(&mn->mn, mm);
kfree(mn);
-destroy_mmu:
- mutex_lock(&obj->base.dev->struct_mutex);
- if (--mmu->count == 0)
- __i915_mmu_notifier_destroy(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- return ret;
}
#else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return 0;
}
+
+static void
+i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+}
+
#endif
+static struct i915_mm_struct *
+__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+{
+ struct i915_mm_struct *mm;
+
+ /* Protected by dev_priv->mm_lock */
+ hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
+ if (mm->mm == real)
+ return mm;
+
+ return NULL;
+}
+
+static int
+i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_mm_struct *mm;
+ int ret = 0;
+
+ /* During release of the GEM object we hold the struct_mutex. This
+ * precludes us from calling mmput() at that time as that may be
+ * the last reference and so call exit_mmap(). exit_mmap() will
+ * attempt to reap the vma, and if we were holding a GTT mmap
+ * would then call drm_gem_vm_close() and attempt to reacquire
+ * the struct mutex. So in order to avoid that recursion, we have
+ * to defer releasing the mm reference until after we drop the
+ * struct_mutex, i.e. we need to schedule a worker to do the clean
+ * up.
+ */
+ mutex_lock(&dev_priv->mm_lock);
+ mm = __i915_mm_struct_find(dev_priv, current->mm);
+ if (mm == NULL) {
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (mm == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&mm->kref);
+ mm->dev = obj->base.dev;
+
+ mm->mm = current->mm;
+ atomic_inc(&current->mm->mm_count);
+
+ mm->mn = NULL;
+
+ /* Protected by dev_priv->mm_lock */
+ hash_add(dev_priv->mm_structs,
+ &mm->node, (unsigned long)mm->mm);
+ } else
+ kref_get(&mm->kref);
+
+ obj->userptr.mm = mm;
+out:
+ mutex_unlock(&dev_priv->mm_lock);
+ return ret;
+}
+
+static void
+__i915_mm_struct_free__worker(struct work_struct *work)
+{
+ struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+ i915_mmu_notifier_free(mm->mn, mm->mm);
+ mmdrop(mm->mm);
+ kfree(mm);
+}
+
+static void
+__i915_mm_struct_free(struct kref *kref)
+{
+ struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+
+ /* Protected by dev_priv->mm_lock */
+ hash_del(&mm->node);
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+
+ INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
+ schedule_work(&mm->work);
+}
+
+static void
+i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
+{
+ if (obj->userptr.mm == NULL)
+ return;
+
+ kref_put_mutex(&obj->userptr.mm->kref,
+ __i915_mm_struct_free,
+ &to_i915(obj->base.dev)->mm_lock);
+ obj->userptr.mm = NULL;
+}
+
struct get_pages_work {
struct work_struct work;
struct drm_i915_gem_object *obj;
struct task_struct *task;
};
-
#if IS_ENABLED(CONFIG_SWIOTLB)
#define swiotlb_active() swiotlb_nr_tbl()
#else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec == NULL)
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec != NULL) {
- struct mm_struct *mm = obj->userptr.mm;
+ struct mm_struct *mm = obj->userptr.mm->mm;
down_read(&mm->mmap_sem);
while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL;
pinned = 0;
- if (obj->userptr.mm == current->mm) {
+ if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
i915_gem_userptr_release__mmu_notifier(obj);
-
- if (obj->userptr.mm) {
- mmput(obj->userptr.mm);
- obj->userptr.mm = NULL;
- }
+ i915_gem_userptr_release__mm_struct(obj);
}
static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
- if (obj->userptr.mn)
+ if (obj->userptr.mmu_object)
return 0;
return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return -ENODEV;
}
- /* Allocate the new object */
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
* at binding. This means that we need to hook into the mmu_notifier
* in order to detect if the mmu is destroyed.
*/
- ret = -ENOMEM;
- if ((obj->userptr.mm = get_task_mm(current)))
+ ret = i915_gem_userptr_init__mm_struct(obj);
+ if (ret == 0)
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int
i915_gem_init_userptr(struct drm_device *dev)
{
-#if defined(CONFIG_MMU_NOTIFIER)
struct drm_i915_private *dev_priv = to_i915(dev);
- hash_init(dev_priv->mmu_notifiers);
-#endif
+ mutex_init(&dev_priv->mm_lock);
+ hash_init(dev_priv->mm_structs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e4d7607da2c4..f29b44c86a2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,16 +334,20 @@
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+
+#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_A (2<<20)
+#define BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
#define BLT_DEPTH_8 (0<<24)
#define BLT_DEPTH_16_565 (1<<24)
#define BLT_DEPTH_16_1555 (2<<24)
#define BLT_DEPTH_32 (3<<24)
-#define BLT_ROP_GXCOPY (0xcc<<16)
+#define BLT_ROP_SRC_COPY (0xcc<<16)
+#define BLT_ROP_COLOR_COPY (0xf0<<16)
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 16371a444426..2d068edd1adc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
+ u32 cs_offset = ring->scratch.gtt_offset;
int ret;
- if (flags & I915_DISPATCH_PINNED) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- } else {
- u32 cs_offset = ring->scratch.gtt_offset;
+ /* Evict the invalid PTE TLBs */
+ intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0xdeadbeef);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ if ((flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(ring, 9+3);
+ ret = intel_ring_begin(ring, 6 + 2);
if (ret)
return ret;
- /* Blit the batch (which has now all relocs applied) to the stable batch
- * scratch bo area (so that the CS never stumbles over its tlb
- * invalidation bug) ... */
- intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
- XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+
+ /* Blit the batch (which has now all relocs applied) to the
+ * stable batch scratch bo area (so that the CS never
+ * stumbles over its tlb invalidation bug) ...
+ */
+ intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+ intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
+
intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
/* ... and execute it. */
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, cs_offset + len - 8);
- intel_ring_advance(ring);
+ offset = cs_offset;
}
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
return 0;
}
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c69d3ce1b3d6..c14341ca3ef9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+ intel_wait_for_vblank(encoder->base.dev,
+ to_intel_crtc(encoder->base.crtc)->pipe);
+
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a125a7e32742..c6c9b02e0ada 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
priv->hdmi_pdev = pdev;
}
+#ifdef CONFIG_OF
+static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+{
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+ char name2[32];
+ snprintf(name2, sizeof(name2), "%s-gpio", name);
+ gpio = of_get_named_gpio(of_node, name2, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+ }
+ return gpio;
+}
+#endif
+
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
- int get_gpio(const char *name)
- {
- int gpio = of_get_named_gpio(of_node, name, 0);
- if (gpio < 0) {
- char name2[32];
- snprintf(name2, sizeof(name2), "%s-gpio", name);
- gpio = of_get_named_gpio(of_node, name2, 0);
- if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
- gpio = -1;
- }
- }
- return gpio;
- }
-
if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
config.mmio_name = "core_physical";
- config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
- config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
- config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
- config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
- config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
- config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
+ config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
+ config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
+ config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
+ config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
+ config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
+ config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
#else
static const char *hpd_clk_names[] = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 902d7685d441..f408b69486a8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,19 +15,25 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifdef CONFIG_COMMON_CLK
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#endif
#include "hdmi.h"
struct hdmi_phy_8960 {
struct hdmi_phy base;
struct hdmi *hdmi;
+#ifdef CONFIG_COMMON_CLK
struct clk_hw pll_hw;
struct clk *pll;
unsigned long pixclk;
+#endif
};
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+#ifdef CONFIG_COMMON_CLK
#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
/*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
};
-
+#endif
/*
* HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
{
struct hdmi_phy_8960 *phy_8960;
struct hdmi_phy *phy = NULL;
- int ret, i;
+ int ret;
+#ifdef CONFIG_COMMON_CLK
+ int i;
/* sanity check: */
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
return ERR_PTR(-EINVAL);
+#endif
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->hdmi = hdmi;
+#ifdef CONFIG_COMMON_CLK
phy_8960->pll_hw.init = &pll_init;
phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->pll = NULL;
goto fail;
}
+#endif
return phy;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 26ee80db17af..fcf95680413d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
#define reglog 0
#endif
-static char *vram;
+static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
module_param(vram, charp, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b1e11f8434e2..ac14b67621d3 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
u8 msg[DP_DPCD_SIZE];
int ret;
- char dpcd_hex_dump[DP_DPCD_SIZE * 3];
-
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
radeon_dp_probe_oui(radeon_connector);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e616eb5f6e7a..3cfb50056f7a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
- /* PFP_SYNC_ME packet only exists on 7xx+ */
- if (emit_wait && (rdev->family >= CHIP_RV770)) {
+ /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
+ if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 92b2d8dd4735..e74c7e387dde 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+ if ((dev->pdev->device == 0x9805) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+ return false;
+ }
return true;
}
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 56d9fd66d8ae..abd6753a570a 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,7 +34,7 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- uint32_t *cpu_addr;
+ uint64_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e1e558a3d692..af8256353c7d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
return err;
}
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+ u64 *reg_id)
+{
+ void *ib_flow;
+ union ib_flow_spec *ib_spec;
+ struct mlx4_dev *dev = to_mdev(qp->device)->dev;
+ int err = 0;
+
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ ib_flow = flow_attr + 1;
+ ib_spec = (union ib_flow_spec *)ib_flow;
+
+ if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+ return 0; /* do nothing */
+
+ err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+ flow_attr->port, qp->qp_num,
+ MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+ reg_id);
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
i++;
}
+ if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ if (err)
+ goto err_free;
+ }
+
return &mflow->ibflow;
err_free:
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 67780452f0cf..efb9eff8906c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
+ if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /* set QP to receive both tunneled & non-tunneled packets */
+ if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ context->srqn = cpu_to_be32(7 << 28);
+ }
+ }
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c
index 180b184ab90f..d70b65a14ced 100644
--- a/drivers/input/keyboard/cap1106.c
+++ b/drivers/input/keyboard/cap1106.c
@@ -33,8 +33,8 @@
#define CAP1106_REG_SENSOR_CONFIG 0x22
#define CAP1106_REG_SENSOR_CONFIG2 0x23
#define CAP1106_REG_SAMPLING_CONFIG 0x24
-#define CAP1106_REG_CALIBRATION 0x25
-#define CAP1106_REG_INT_ENABLE 0x26
+#define CAP1106_REG_CALIBRATION 0x26
+#define CAP1106_REG_INT_ENABLE 0x27
#define CAP1106_REG_REPEAT_RATE 0x28
#define CAP1106_REG_MT_CONFIG 0x2a
#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 8d2e19e81e1e..e651fa692afe 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -332,23 +332,24 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev,
}
if (pdata->clustered_irq > 0) {
- err = request_irq(pdata->clustered_irq,
+ err = request_any_context_irq(pdata->clustered_irq,
matrix_keypad_interrupt,
pdata->clustered_irq_flags,
"matrix-keypad", keypad);
- if (err) {
+ if (err < 0) {
dev_err(&pdev->dev,
"Unable to acquire clustered interrupt\n");
goto err_free_rows;
}
} else {
for (i = 0; i < pdata->num_row_gpios; i++) {
- err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+ err = request_any_context_irq(
+ gpio_to_irq(pdata->row_gpios[i]),
matrix_keypad_interrupt,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"matrix-keypad", keypad);
- if (err) {
+ if (err < 0) {
dev_err(&pdev->dev,
"Unable to acquire interrupt for GPIO line %i\n",
pdata->row_gpios[i]);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a956b980ee73..35a49bf57227 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2373,6 +2373,10 @@ int alps_init(struct psmouse *psmouse)
dev2->keybit[BIT_WORD(BTN_LEFT)] =
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
+ __set_bit(INPUT_PROP_POINTER, dev2->propbit);
+ if (priv->flags & ALPS_DUALPOINT)
+ __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
+
if (input_register_device(priv->dev2))
goto init_fail;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index da51738eb59e..06fc6e76ffbe 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1331,6 +1331,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
if (param[1] == 0)
return true;
+ /*
+ * Some models have a revision higher then 20. Meaning param[2] may
+ * be 10 or 20, skip the rates check for these.
+ */
+ if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+ return true;
+
for (i = 0; i < ARRAY_SIZE(rates); i++)
if (param[2] == rates[i])
return false;
@@ -1607,6 +1614,10 @@ int elantech_init(struct psmouse *psmouse)
tp_dev->keybit[BIT_WORD(BTN_LEFT)] =
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) |
BIT_MASK(BTN_RIGHT);
+
+ __set_bit(INPUT_PROP_POINTER, tp_dev->propbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit);
+
error = input_register_device(etd->tp_dev);
if (error < 0)
goto init_fail_tp_reg;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index cff065f6261c..b4e1f014ddc2 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -670,6 +670,8 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
psmouse->poll = psmouse_poll;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e8573c68f77e..fd23181c1fb7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -629,10 +629,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
((buf[0] & 0x04) >> 1) |
((buf[3] & 0x04) >> 2));
+ if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+ hw->w == 2) {
+ synaptics_parse_agm(buf, priv, hw);
+ return 1;
+ }
+
+ hw->x = (((buf[3] & 0x10) << 8) |
+ ((buf[1] & 0x0f) << 8) |
+ buf[4]);
+ hw->y = (((buf[3] & 0x20) << 7) |
+ ((buf[1] & 0xf0) << 4) |
+ buf[5]);
+ hw->z = buf[2];
+
hw->left = (buf[0] & 0x01) ? 1 : 0;
hw->right = (buf[0] & 0x02) ? 1 : 0;
- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+ if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
+ /*
+ * ForcePads, like Clickpads, use middle button
+ * bits to report primary button clicks.
+ * Unfortunately they report primary button not
+ * only when user presses on the pad above certain
+ * threshold, but also when there are more than one
+ * finger on the touchpad, which interferes with
+ * out multi-finger gestures.
+ */
+ if (hw->z == 0) {
+ /* No contacts */
+ priv->press = priv->report_press = false;
+ } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
+ /*
+ * Single-finger touch with pressure above
+ * the threshold. If pressure stays long
+ * enough, we'll start reporting primary
+ * button. We rely on the device continuing
+ * sending data even if finger does not
+ * move.
+ */
+ if (!priv->press) {
+ priv->press_start = jiffies;
+ priv->press = true;
+ } else if (time_after(jiffies,
+ priv->press_start +
+ msecs_to_jiffies(50))) {
+ priv->report_press = true;
+ }
+ } else {
+ priv->press = false;
+ }
+
+ hw->left = priv->report_press;
+
+ } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
/*
* Clickpad's button is transmitted as middle button,
* however, since it is primary button, we will report
@@ -651,21 +702,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
}
- if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
- hw->w == 2) {
- synaptics_parse_agm(buf, priv, hw);
- return 1;
- }
-
- hw->x = (((buf[3] & 0x10) << 8) |
- ((buf[1] & 0x0f) << 8) |
- buf[4]);
- hw->y = (((buf[3] & 0x20) << 7) |
- ((buf[1] & 0xf0) << 4) |
- buf[5]);
- hw->z = buf[2];
-
if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
((buf[0] ^ buf[3]) & 0x02)) {
switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index e594af0b264b..fb2e076738ae 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -78,6 +78,11 @@
* 2 0x08 image sensor image sensor tracks 5 fingers, but only
* reports 2.
* 2 0x20 report min query 0x0f gives min coord reported
+ * 2 0x80 forcepad forcepad is a variant of clickpad that
+ * does not have physical buttons but rather
+ * uses pressure above certain threshold to
+ * report primary clicks. Forcepads also have
+ * clickpad bit set.
*/
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
@@ -86,6 +91,7 @@
#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
+#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -177,6 +183,11 @@ struct synaptics_data {
*/
struct synaptics_hw_state agm;
bool agm_pending; /* new AGM packet received */
+
+ /* ForcePad handling */
+ unsigned long press_start;
+ bool press;
+ bool report_press;
};
void synaptics_module_init(void);
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c
index e122bda16aab..6bcc0189c1c9 100644
--- a/drivers/input/mouse/synaptics_usb.c
+++ b/drivers/input/mouse/synaptics_usb.c
@@ -387,6 +387,7 @@ static int synusb_probe(struct usb_interface *intf,
__set_bit(EV_REL, input_dev->evbit);
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0);
} else {
input_set_abs_params(input_dev, ABS_X,
@@ -401,6 +402,11 @@ static int synusb_probe(struct usb_interface *intf,
__set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
}
+ if (synusb->flags & SYNUSB_TOUCHSCREEN)
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ else
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
__set_bit(BTN_MIDDLE, input_dev->keybit);
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index ca843b6cf6bd..30c8b6998808 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -393,6 +393,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
if ((button_info & 0x0f) >= 3)
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+ __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
+
trackpoint_defaults(psmouse->private);
error = trackpoint_power_on_reset(&psmouse->ps2dev);
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 0cb7ef59071b..69175b825346 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/serio.h>
#include <linux/tty.h>
+#include <linux/compat.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Input device TTY line discipline");
@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
return 0;
}
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
+{
+ struct serport *serport = tty->disc_data;
+
+ serport->id.proto = type & 0x000000ff;
+ serport->id.id = (type & 0x0000ff00) >> 8;
+ serport->id.extra = (type & 0x00ff0000) >> 16;
+}
+
/*
* serport_ldisc_ioctl() allows to set the port protocol, and device ID
*/
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- struct serport *serport = (struct serport*) tty->disc_data;
- unsigned long type;
-
if (cmd == SPIOCSTYPE) {
+ unsigned long type;
+
if (get_user(type, (unsigned long __user *) arg))
return -EFAULT;
- serport->id.proto = type & 0x000000ff;
- serport->id.id = (type & 0x0000ff00) >> 8;
- serport->id.extra = (type & 0x00ff0000) >> 16;
+ serport_set_type(tty, type);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+ struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ if (cmd == COMPAT_SPIOCSTYPE) {
+ void __user *uarg = compat_ptr(arg);
+ compat_ulong_t compat_type;
+
+ if (get_user(compat_type, (compat_ulong_t __user *)uarg))
+ return -EFAULT;
+ serport_set_type(tty, compat_type);
return 0;
}
return -EINVAL;
}
+#endif
static void serport_ldisc_write_wakeup(struct tty_struct * tty)
{
@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = {
.close = serport_ldisc_close,
.read = serport_ldisc_read,
.ioctl = serport_ldisc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = serport_ldisc_compat_ioctl,
+#endif
.receive_buf = serport_ldisc_receive,
.write_wakeup = serport_ldisc_write_wakeup
};
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index db178ed2b47e..aaacf8bfa61f 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -837,7 +837,12 @@ static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
count = data->msg_buf[0];
if (count == 0) {
- dev_warn(dev, "Interrupt triggered but zero messages\n");
+ /*
+ * This condition is caused by the CHG line being configured
+ * in Mode 0. It results in unnecessary I2C operations but it
+ * is benign.
+ */
+ dev_dbg(dev, "Interrupt triggered but zero messages\n");
return IRQ_NONE;
} else if (count > data->max_reportid) {
dev_err(dev, "T44 count %d exceeded max report id\n", count);
@@ -1374,11 +1379,16 @@ static int mxt_get_info(struct mxt_data *data)
return 0;
}
-static void mxt_free_object_table(struct mxt_data *data)
+static void mxt_free_input_device(struct mxt_data *data)
{
- input_unregister_device(data->input_dev);
- data->input_dev = NULL;
+ if (data->input_dev) {
+ input_unregister_device(data->input_dev);
+ data->input_dev = NULL;
+ }
+}
+static void mxt_free_object_table(struct mxt_data *data)
+{
kfree(data->object_table);
data->object_table = NULL;
kfree(data->msg_buf);
@@ -1957,11 +1967,13 @@ static int mxt_load_fw(struct device *dev, const char *fn)
ret = mxt_lookup_bootloader_address(data, 0);
if (ret)
goto release_firmware;
+
+ mxt_free_input_device(data);
+ mxt_free_object_table(data);
} else {
enable_irq(data->irq);
}
- mxt_free_object_table(data);
reinit_completion(&data->bl_completion);
ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
@@ -2210,6 +2222,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
err_free_object:
+ mxt_free_input_device(data);
mxt_free_object_table(data);
err_free_irq:
free_irq(client->irq, data);
@@ -2224,7 +2237,7 @@ static int mxt_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
free_irq(data->irq, data);
- input_unregister_device(data->input_dev);
+ mxt_free_input_device(data);
mxt_free_object_table(data);
kfree(data);
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 16b52115c27f..705ffa1e064a 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -41,7 +41,7 @@
*/
static int rpu = 8;
module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
/*
* Set current used for pressure measurement.
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index 7405353199d7..572a5a64face 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -41,7 +41,7 @@
*/
static int rpu = 8;
module_param(rpu, int, 0);
-MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect.");
+MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
/*
* Set current used for pressure measurement.
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ca18d6d42a9b..a83cc2a2a2ca 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -146,6 +146,8 @@
#define ID0_CTTW (1 << 14)
#define ID0_NUMIRPT_SHIFT 16
#define ID0_NUMIRPT_MASK 0xff
+#define ID0_NUMSIDB_SHIFT 9
+#define ID0_NUMSIDB_MASK 0xf
#define ID0_NUMSMRG_SHIFT 0
#define ID0_NUMSMRG_MASK 0xff
@@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu,
master->of_node = masterspec->np;
master->cfg.num_streamids = masterspec->args_count;
- for (i = 0; i < master->cfg.num_streamids; ++i)
- master->cfg.streamids[i] = masterspec->args[i];
+ for (i = 0; i < master->cfg.num_streamids; ++i) {
+ u16 streamid = masterspec->args[i];
+ if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+ (streamid >= smmu->num_mapping_groups)) {
+ dev_err(dev,
+ "stream ID for master device %s greater than maximum allowed (%d)\n",
+ masterspec->np->name, smmu->num_mapping_groups);
+ return -ERANGE;
+ }
+ master->cfg.streamids[i] = streamid;
+ }
return insert_smmu_master(smmu, master);
}
@@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
if (fsr & FSR_IGN)
dev_err_ratelimited(smmu->dev,
- "Unexpected context fault (fsr 0x%u)\n",
+ "Unexpected context fault (fsr 0x%x)\n",
fsr);
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
@@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
break;
case 39:
+ case 40:
reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
break;
case 42:
@@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
break;
case 39:
+ case 40:
reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
break;
case 42:
@@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
reg |= TTBCR_EAE |
(TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
(TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
- (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+ (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
+
+ if (!stage1)
+ reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
/* MAIR0 (stage-1 only) */
@@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_device *smmu)
{
- int irq, ret, start;
+ int irq, start, ret = 0;
+ unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ spin_lock_irqsave(&smmu_domain->lock, flags);
+ if (smmu_domain->smmu)
+ goto out_unlock;
+
if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
/*
* We will likely want to change this if/when KVM gets
@@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
if (IS_ERR_VALUE(ret))
- return ret;
+ goto out_unlock;
cfg->cbndx = ret;
if (smmu->version == 1) {
@@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
+ ACCESS_ONCE(smmu_domain->smmu) = smmu;
+ arm_smmu_init_context_bank(smmu_domain);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
+
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
@@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
cfg->irptndx = INVALID_IRPTNDX;
- goto out_free_context;
}
- smmu_domain->smmu = smmu;
- arm_smmu_init_context_bank(smmu_domain);
return 0;
-out_free_context:
- __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+out_unlock:
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret;
}
@@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd)
{
pgtable_t table = pmd_pgtable(*pmd);
- pgtable_page_dtor(table);
__free_page(table);
}
@@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
struct arm_smmu_smr *smrs = cfg->smrs;
+ if (!smrs)
+ return;
+
/* Invalidate the SMRs before freeing back to the allocator */
for (i = 0; i < cfg->num_streamids; ++i) {
u8 idx = smrs[i].idx;
@@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
kfree(smrs);
}
-static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
-{
- int i;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- for (i = 0; i < cfg->num_streamids; ++i) {
- u16 sid = cfg->streamids[i];
-
- writel_relaxed(S2CR_TYPE_BYPASS,
- gr0_base + ARM_SMMU_GR0_S2CR(sid));
- }
-}
-
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_cfg *cfg)
{
@@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_cfg *cfg)
{
+ int i;
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
/*
* We *must* clear the S2CR first, because freeing the SMR means
* that it can be re-allocated immediately.
*/
- arm_smmu_bypass_stream_mapping(smmu, cfg);
+ for (i = 0; i < cfg->num_streamids; ++i) {
+ u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+
+ writel_relaxed(S2CR_TYPE_BYPASS,
+ gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ }
+
arm_smmu_master_free_smrs(smmu, cfg);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int ret = -EINVAL;
+ int ret;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu;
+ struct arm_smmu_device *smmu, *dom_smmu;
struct arm_smmu_master_cfg *cfg;
- unsigned long flags;
smmu = dev_get_master_dev(dev)->archdata.iommu;
if (!smmu) {
@@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't support domains across
* different SMMUs.
*/
- spin_lock_irqsave(&smmu_domain->lock, flags);
- if (!smmu_domain->smmu) {
+ dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
+ if (!dom_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, smmu);
if (IS_ERR_VALUE(ret))
- goto err_unlock;
- } else if (smmu_domain->smmu != smmu) {
+ return ret;
+
+ dom_smmu = smmu_domain->smmu;
+ }
+
+ if (dom_smmu != smmu) {
dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
- dev_name(smmu_domain->smmu->dev),
- dev_name(smmu->dev));
- goto err_unlock;
+ dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
+ return -EINVAL;
}
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
/* Looks ok, so add the device to the domain */
cfg = find_smmu_master_cfg(smmu_domain->smmu, dev);
@@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENODEV;
return arm_smmu_domain_add_master(smmu_domain, cfg);
-
-err_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
- return ret;
}
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
return -ENOMEM;
arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
- if (!pgtable_page_ctor(table)) {
- __free_page(table);
- return -ENOMEM;
- }
pmd_populate(NULL, pmd, table);
arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
}
@@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Mark all SMRn as invalid and all S2CRn as bypass */
for (i = 0; i < smmu->num_mapping_groups; ++i) {
- writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
writel_relaxed(S2CR_TYPE_BYPASS,
gr0_base + ARM_SMMU_GR0_S2CR(i));
}
@@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev,
"\tstream matching with %u register groups, mask 0x%x",
smmu->num_mapping_groups, mask);
+ } else {
+ smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
+ ID0_NUMSIDB_MASK;
}
/* ID1 */
@@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* Stage-1 output limited by stage-2 input size due to pgd
* allocation (PTRS_PER_PGD).
*/
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
#ifdef CONFIG_64BIT
- smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
+ smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
#else
- smmu->s1_output_size = min(32UL, size);
+ smmu->s1_output_size = min(32UL, size);
#endif
+ } else {
+ smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT,
+ size);
+ }
/* The stage-2 output mask is also applied for bypass */
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
@@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->irqs[i] = irq;
}
+ err = arm_smmu_device_cfg_probe(smmu);
+ if (err)
+ return err;
+
i = 0;
smmu->masters = RB_ROOT;
while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
@@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
dev_notice(dev, "registered %d master devices\n", i);
- err = arm_smmu_device_cfg_probe(smmu);
- if (err)
- goto out_put_masters;
-
parse_driver_options(smmu);
if (smmu->version > 1 &&
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 60ab474bfff3..06d268abe951 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void)
andd->device_name);
continue;
}
- acpi_bus_get_device(h, &adev);
- if (!adev) {
+ if (acpi_bus_get_device(h, &adev)) {
pr_err("Failed to get device for ACPI object %s\n",
andd->device_name);
continue;
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 61d1dafa242d..56feed7cec15 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev)
struct iommu_group *group = ERR_PTR(-ENODEV);
struct pci_dev *pdev;
const u32 *prop;
- int ret, len;
+ int ret = 0, len;
/*
* For platform devices we allocate a separate group for
@@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev)
if (IS_ERR(group))
return PTR_ERR(group);
- ret = iommu_group_add_device(group, dev);
+ /*
+ * Check if device has already been added to an iommu group.
+ * Group could have already been created for a PCI device in
+ * the iommu_group_get_for_dev path.
+ */
+ if (!dev->iommu_group)
+ ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
return ret;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ac4adb337038..0639b9274b11 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
*/
struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
- struct iommu_group *group = ERR_PTR(-EIO);
+ struct iommu_group *group;
int ret;
group = iommu_group_get(dev);
if (group)
return group;
- if (dev_is_pci(dev))
- group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
+ if (!dev_is_pci(dev))
+ return ERR_PTR(-EINVAL);
+
+ group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
if (IS_ERR(group))
return group;
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index f8636a650cf6..5945223b73fa 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 85c2985d8bcb..bbbaf5de65d2 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -220,7 +220,7 @@ static int __init crossbar_of_init(struct device_node *node)
of_property_read_u32_index(node,
"ti,irqs-reserved",
i, &entry);
- if (entry > max) {
+ if (entry >= max) {
pr_err("Invalid reserved entry\n");
ret = -EINVAL;
goto err_irq_map;
@@ -238,7 +238,7 @@ static int __init crossbar_of_init(struct device_node *node)
of_property_read_u32_index(node,
"ti,irqs-skip",
i, &entry);
- if (entry > max) {
+ if (entry >= max) {
pr_err("Invalid skip entry\n");
ret = -EINVAL;
goto err_irq_map;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 57eaa5a0b1e3..a0698b4f0303 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -36,7 +36,7 @@
struct gic_chip_data {
void __iomem *dist_base;
void __iomem **redist_base;
- void __percpu __iomem **rdist;
+ void __iomem * __percpu *rdist;
struct irq_domain *domain;
u64 redist_stride;
u32 redist_regions;
@@ -104,7 +104,7 @@ static void gic_redist_wait_for_rwp(void)
}
/* Low level accessors */
-static u64 gic_read_iar(void)
+static u64 __maybe_unused gic_read_iar(void)
{
u64 irqstat;
@@ -112,24 +112,24 @@ static u64 gic_read_iar(void)
return irqstat;
}
-static void gic_write_pmr(u64 val)
+static void __maybe_unused gic_write_pmr(u64 val)
{
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
}
-static void gic_write_ctlr(u64 val)
+static void __maybe_unused gic_write_ctlr(u64 val)
{
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
isb();
}
-static void gic_write_grpen1(u64 val)
+static void __maybe_unused gic_write_grpen1(u64 val)
{
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
isb();
}
-static void gic_write_sgi1r(u64 val)
+static void __maybe_unused gic_write_sgi1r(u64 val)
{
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
}
@@ -200,19 +200,6 @@ static void gic_poke_irq(struct irq_data *d, u32 offset)
rwp_wait();
}
-static int gic_peek_irq(struct irq_data *d, u32 offset)
-{
- u32 mask = 1 << (gic_irq(d) % 32);
- void __iomem *base;
-
- if (gic_irq_in_rdist(d))
- base = gic_data_rdist_sgi_base();
- else
- base = gic_data.dist_base;
-
- return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
-}
-
static void gic_mask_irq(struct irq_data *d)
{
gic_poke_irq(d, GICD_ICENABLER);
@@ -401,6 +388,19 @@ static void gic_cpu_init(void)
}
#ifdef CONFIG_SMP
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+ void __iomem *base;
+
+ if (gic_irq_in_rdist(d))
+ base = gic_data_rdist_sgi_base();
+ else
+ base = gic_data.dist_base;
+
+ return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
static int gic_secondary_init(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4b959e606fe8..dda6dbc23565 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -867,7 +867,7 @@ static int gic_routable_irq_domain_xlate(struct irq_domain *d,
return 0;
}
-const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
+static const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
.map = gic_routable_irq_domain_map,
.unmap = gic_routable_irq_domain_unmap,
.xlate = gic_routable_irq_domain_xlate,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1af40ee209e2..7130505c2425 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
struct cache *cache = mg->cache;
if (mg->writeback) {
- cell_defer(cache, mg->old_ocell, false);
clear_dirty(cache, mg->old_oblock, mg->cblock);
+ cell_defer(cache, mg->old_ocell, false);
cleanup_migration(mg);
return;
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
}
} else {
+ clear_dirty(cache, mg->new_oblock, mg->cblock);
if (mg->requeue_holder)
cell_defer(cache, mg->new_ocell, true);
else {
bio_endio(mg->new_ocell->holder, 0);
cell_defer(cache, mg->new_ocell, false);
}
- clear_dirty(cache, mg->new_oblock, mg->cblock);
cleanup_migration(mg);
}
}
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 7ffdb589841e..7e1efd5f58f0 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -79,6 +79,11 @@ static void firmware_load(const struct firmware *fw, void *context)
u32 jedec_id;
u32 status;
+ if (fw == NULL) {
+ dev_err(&spi->dev, "Cannot load firmware, aborting\n");
+ return;
+ }
+
if (fw->size == 0) {
dev_err(&spi->dev, "Error: Firmware size is 0!\n");
return;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 059c7414e303..3fe45c705933 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2177,10 +2177,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
- cpu_to_le32(pci_map_single(
- VORTEX_PCI(vp),
- (void *)skb_frag_address(frag),
- skb_frag_size(frag), PCI_DMA_TODEVICE));
+ cpu_to_le32(skb_frag_dma_map(
+ &VORTEX_PCI(vp)->dev,
+ frag,
+ frag->page_offset, frag->size, DMA_TO_DEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 23578dfee249..3005155e412b 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
}
+static inline void greth_enable_tx_and_irq(struct greth_private *greth)
+{
+ wmb(); /* BDs must been written to memory before enabling TX */
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
+}
+
static inline void greth_disable_tx(struct greth_private *greth)
{
GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
@@ -447,29 +453,30 @@ out:
return err;
}
+static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
+{
+ if (tx_next < tx_last)
+ return (tx_last - tx_next) - 1;
+ else
+ return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
+}
static netdev_tx_t
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
- u32 status = 0, dma_addr, ctrl;
+ u32 status, dma_addr;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
unsigned long flags;
+ u16 tx_last;
nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_last = greth->tx_last;
+ rmb(); /* tx_last is updated by the poll task */
- /* Clean TX Ring */
- greth_clean_tx_gbit(dev);
-
- if (greth->tx_free < nr_frags + 1) {
- spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
- ctrl = GRETH_REGLOAD(greth->regs->control);
- /* Enable TX IRQ only if not already in poll() routine */
- if (ctrl & GRETH_RXI)
- GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
netif_stop_queue(dev);
- spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* Linear buf */
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
+ else
+ status = GRETH_BD_IE;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* Enable the descriptor chain by enabling the first descriptor */
bdp = greth->tx_bd_base + greth->tx_next;
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
- greth->tx_next = curr_tx;
- greth->tx_free -= nr_frags + 1;
-
- wmb();
+ greth_write_bd(&bdp->stat,
+ greth_read_bd(&bdp->stat) | GRETH_BD_EN);
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
- greth_enable_tx(greth);
+ greth->tx_next = curr_tx;
+ greth_enable_tx_and_irq(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
if (greth->tx_free > 0) {
netif_wake_queue(dev);
}
-
}
static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
{
struct greth_private *greth;
struct greth_bd *bdp, *bdp_last_frag;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
u32 stat;
int nr_frags, i;
+ u16 tx_last;
greth = netdev_priv(dev);
+ tx_last = greth->tx_last;
- while (greth->tx_free < GRETH_TXBD_NUM) {
+ while (tx_last != greth->tx_next) {
- skb = greth->tx_skbuff[greth->tx_last];
+ skb = greth->tx_skbuff[tx_last];
nr_frags = skb_shinfo(skb)->nr_frags;
/* We only clean fully completed SKBs */
- bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
mb();
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
if (stat & GRETH_BD_EN)
break;
- greth->tx_skbuff[greth->tx_last] = NULL;
+ greth->tx_skbuff[tx_last] = NULL;
greth_update_tx_stats(dev, stat);
dev->stats.tx_bytes += skb->len;
- bdp = greth->tx_bd_base + greth->tx_last;
+ bdp = greth->tx_bd_base + tx_last;
- greth->tx_last = NEXT_TX(greth->tx_last);
+ tx_last = NEXT_TX(tx_last);
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- bdp = greth->tx_bd_base + greth->tx_last;
+ bdp = greth->tx_bd_base + tx_last;
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
skb_frag_size(frag),
DMA_TO_DEVICE);
- greth->tx_last = NEXT_TX(greth->tx_last);
+ tx_last = NEXT_TX(tx_last);
}
- greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
+ if (skb) { /* skb is set only if the above while loop was entered */
+ wmb();
+ greth->tx_last = tx_last;
- if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
- netif_wake_queue(dev);
+ if (netif_queue_stopped(dev) &&
+ (greth_num_free_bds(tx_last, greth->tx_next) >
+ (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
+ }
}
static int greth_rx(struct net_device *dev, int limit)
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
greth = container_of(napi, struct greth_private, napi);
restart_txrx_poll:
- if (netif_queue_stopped(greth->netdev)) {
- if (greth->gbit_mac)
- greth_clean_tx_gbit(greth->netdev);
- else
- greth_clean_tx(greth->netdev);
- }
-
if (greth->gbit_mac) {
+ greth_clean_tx_gbit(greth->netdev);
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
+ if (netif_queue_stopped(greth->netdev))
+ greth_clean_tx(greth->netdev);
work_done += greth_rx(greth->netdev, budget - work_done);
}
@@ -983,7 +992,8 @@ restart_txrx_poll:
spin_lock_irqsave(&greth->devlock, flags);
ctrl = GRETH_REGLOAD(greth->regs->control);
- if (netif_queue_stopped(greth->netdev)) {
+ if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
+ (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
GRETH_REGSAVE(greth->regs->control,
ctrl | GRETH_TXI | GRETH_RXI);
mask = GRETH_INT_RX | GRETH_INT_RE |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 232a622a85b7..ae16ac94daf8 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -107,7 +107,7 @@ struct greth_private {
u16 tx_next;
u16 tx_last;
- u16 tx_free;
+ u16 tx_free; /* only used on 10/100Mbit */
u16 rx_cur;
struct greth_regs *regs; /* Address of controller registers. */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 346592dca33c..a3c11355a34d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
struct xgbe_prv_data *pdata = filp->private_data;
unsigned int value;
- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg);
+ value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
return xgbe_common_read(buffer, count, ppos, value);
}
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
if (len < 0)
return len;
- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg, value);
+ XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+ value);
return len;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index edaca4496264..ea273836d999 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
/* Clear MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
/* Enable all counter interrupts */
- XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
- XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
}
static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
{
unsigned int i, count;
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
- unsigned char queue_count)
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+ unsigned int queue_count)
{
unsigned int q_fifo_size = 0;
enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
q_fifo_size = XGBE_FIFO_SIZE_KB(256);
break;
}
+
+ /* The configured value is not the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
q_fifo_size = q_fifo_size / queue_count;
/* Set the queue fifo size programmable value */
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
xgbe_disable_rx_vlan_stripping(pdata);
}
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ };
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+
+ if (read_hi)
+ val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+ return val;
+}
+
static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
{
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
stats->txoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
stats->txframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
stats->txmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
stats->tx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
stats->tx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
stats->tx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
stats->tx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
stats->tx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
stats->tx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
stats->txunicastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
stats->txmulticastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
stats->txunderflowerror +=
- XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
stats->txoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
stats->txframecount_g +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
stats->txpauseframes +=
- XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
stats->txvlanframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
}
static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
stats->rxframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
stats->rxoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
stats->rxoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
stats->rxbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
stats->rxmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
stats->rxcrcerror +=
- XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
stats->rxrunterror +=
- XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
stats->rxjabbererror +=
- XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
stats->rxundersize_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
stats->rxoversize_g +=
- XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
stats->rx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
stats->rx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
stats->rx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
stats->rx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
stats->rx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
stats->rx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
stats->rxunicastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
stats->rxlengtherror +=
- XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
stats->rxoutofrangetype +=
- XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
stats->rxpauseframes +=
- XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
stats->rxfifooverflow +=
- XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
stats->rxvlanframes_gb +=
- XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
stats->rxwatchdogerror +=
- XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
}
static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
stats->txoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
stats->txframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
stats->txmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
stats->tx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
stats->tx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
stats->tx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
stats->tx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
stats->tx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
stats->tx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
stats->txunicastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
stats->txmulticastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
stats->txunderflowerror +=
- XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
stats->txoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
stats->txframecount_g +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
stats->txpauseframes +=
- XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
stats->txvlanframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
stats->rxframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
stats->rxoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
stats->rxoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
stats->rxbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
stats->rxmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
stats->rxcrcerror +=
- XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
stats->rxrunterror +=
- XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
stats->rxjabbererror +=
- XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
stats->rxundersize_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
stats->rxoversize_g +=
- XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
stats->rx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
stats->rx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
stats->rx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
stats->rx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
stats->rx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
stats->rx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
stats->rxunicastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
stats->rxlengtherror +=
- XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
stats->rxoutofrangetype +=
- XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
stats->rxpauseframes +=
- XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
stats->rxfifooverflow +=
- XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
stats->rxvlanframes_gb +=
- XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
stats->rxwatchdogerror +=
- XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
/* Un-freeze counters */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index dc84f7193c2d..b26d75856553 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
memset(hw_feat, 0, sizeof(*hw_feat));
+ hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
/* Hardware feature register 0 */
hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index a076aca138a1..46f613028e9c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
- XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
- XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
- XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
drvinfo->n_stats = XGBE_STATS_COUNT;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 8aa6a9353f7b..bdf9cfa70e88 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
}
if (i < pdata->rx_ring_count) {
- spin_lock_init(&tx_ring->lock);
+ spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 07bf70a82908..e9fe6e6ddcc3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -183,6 +183,7 @@
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
+#define XGBE_FIFO_MAX 81920
#define XGBE_FIFO_SIZE_B(x) (x)
#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
@@ -526,6 +527,9 @@ struct xgbe_desc_if {
* or configurations are present in the device.
*/
struct xgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
/* HW Feature Register0 */
unsigned int gmii; /* 1000 Mbps support */
unsigned int vlhash; /* VLAN Hash Filter */
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 616dff6d3f5f..f4054d242f3c 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,5 +1,6 @@
config NET_XGENE
tristate "APM X-Gene SoC Ethernet Driver"
+ depends on HAS_DMA
select PHYLIB
help
This is the Ethernet driver for the on-chip ethernet interface on the
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7dcfb19a31c8..d8d07a818b89 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -84,7 +84,7 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
- depends on PCI
+ depends on PCI && (IPV6 || IPV6=n)
select BNX2
select UIO
---help---
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5ba8af50c84f..c4daa068f1db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2233,7 +2233,12 @@ struct shmem2_region {
u32 reserved3; /* Offset 0x14C */
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
- #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
+ #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
+ #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
+ #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
+ #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
u32 reserved5[2];
u32 reserved6[PORT_MAX];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 53fb4fa61b40..549549eaf580 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
+ #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
#define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
-#define SFP_EEPROM_COMP_CODE_ADDR 0x3
- #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
- #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
- #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
+ #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
+ #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
+ #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
+ #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
+ #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
+ #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
+ #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
reg_set[i].val);
/* Start KR2 work-around timer which handles BCM8073 link-parner */
- vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
}
static void bnx2x_disable_kr2(struct link_params *params,
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
}
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
~FEATURE_CONFIG_PFC_ENABLED;
if (SHMEM2_HAS(bp, link_attr_sync))
- vars->link_attr_sync = SHMEM2_RD(bp,
+ params->link_attr_sync = SHMEM2_RD(bp,
link_attr_sync[params->port]);
DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 gport, val[2], check_limiting_mode = 0;
+ u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
I2C_DEV_ADDR_A0,
- SFP_EEPROM_CON_TYPE_ADDR,
- 2,
+ 0,
+ SFP_EEPROM_FC_TX_TECH_ADDR + 1,
(u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
-
- switch (val[0]) {
+ params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+ params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+ LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* Check if its active cable (includes SFP+ module)
* of passive cable
*/
- if (bnx2x_read_sfp_module_eeprom(phy,
- params,
- I2C_DEV_ADDR_A0,
- SFP_EEPROM_FC_TX_TECH_ADDR,
- 1,
- &copper_module_type) != 0) {
- DP(NETIF_MSG_LINK,
- "Failed to read copper-cable-type"
- " from SFP+ EEPROM\n");
- return -EINVAL;
- }
+ copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
}
break;
}
+ case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
case SFP_EEPROM_CON_TYPE_VAL_LC:
case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
- if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
- SFP_EEPROM_COMP_CODE_LR_MASK |
- SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
DP(NETIF_MSG_LINK, "1G SFP module detected\n");
- gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != SPEED_1000) {
+ u8 gport = params->port;
phy->req_line_speed = SPEED_1000;
if (!CHIP_IS_E1x(bp)) {
gport = BP_PATH(bp) +
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
"Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
gport);
}
+ if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+ SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ msleep(40);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val[0]);
+ val[SFP_EEPROM_CON_TYPE_ADDR]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No sigdet\n");
}
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No BP\n");
}
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
next_page);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 389f5f8cb0a3..d9cce4c3899b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -323,6 +323,9 @@ struct link_params {
#define LINK_FLAGS_INT_DISABLED (1<<0)
#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
+
+ /* The same definitions as the shmem2 parameter */
+ u32 link_attr_sync;
};
/* Output parameters */
@@ -364,8 +367,6 @@ struct link_vars {
u8 rx_tx_asic_rst;
u8 turn_to_run_wc_rt;
u16 rsrv2;
- /* The same definitions as the shmem2 parameter */
- u32 link_attr_sync;
};
/***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 900cab420810..d1c093dcb054 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
bnx2x_release_phy_lock(bp);
}
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+ bnx2x_config_endianity(bp, 1);
+#else
+ bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+ bnx2x_config_endianity(bp, 0);
+}
+
/**
* bnx2x_init_hw_common - initialize the HW at the COMMON phase.
*
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
- REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
- /* make sure this value is 0 */
- REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
- REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
+ bnx2x_set_endianity(bp);
bnx2x_ilt_init_page_size(bp, INITOP_SET);
if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
bnx2x_iov_remove_one(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- if (IS_PF(bp))
+ if (IS_PF(bp)) {
bnx2x_set_power_state(bp, PCI_D0);
+ /* Set endianity registers to reset values in case next driver
+ * boots in different endianty environment.
+ */
+ bnx2x_reset_endianity(bp);
+ }
+
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 27861a6c7ca5..a6a9f284c8dd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,7 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_IPV6)
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3ac5d23454a8..cb77ae93d89a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -11617,6 +11617,12 @@ static int tg3_open(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
int err;
+ if (tp->pcierr_recovery) {
+ netdev_err(dev, "Failed to open device. PCI error recovery "
+ "in progress\n");
+ return -EAGAIN;
+ }
+
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
if (tg3_asic_rev(tp) == ASIC_REV_57766) {
@@ -11674,6 +11680,12 @@ static int tg3_close(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+ if (tp->pcierr_recovery) {
+ netdev_err(dev, "Failed to close device. PCI error recovery "
+ "in progress\n");
+ return -EAGAIN;
+ }
+
tg3_ptp_fini(tp);
tg3_stop(tp);
@@ -17561,6 +17573,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->irq_sync = 1;
+ tp->pcierr_recovery = false;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
@@ -18071,6 +18084,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ tp->pcierr_recovery = true;
+
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
goto done;
@@ -18195,6 +18210,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_phy_start(tp);
done:
+ tp->pcierr_recovery = false;
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 461accaf0aa4..31c9f8295953 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3407,6 +3407,7 @@ struct tg3 {
struct device *hwmon_dev;
bool link_up;
+ bool pcierr_recovery;
};
/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff8cae5e2535..ffc92a41d75b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
* For TSO, the TCP checksum field is seeded with pseudo-header sum
* excluding the length field.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
/* Do we really need these? */
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 net_proto = vlan_get_protocol(skb);
u8 proto = 0;
- if (skb->protocol == htons(ETH_P_IP))
+ if (net_proto == htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
#ifdef NETIF_F_IPV6_CSUM
- else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else if (net_proto == htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 184a063bed5f..07d2201530d2 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,7 @@
config NET_CALXEDA_XGMAC
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
+ depends on ARCH_HIGHBANK || COMPILE_TEST
select CRC32
help
This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 18fb9c61d7ba..8c34811a1128 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
- t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+ t4_write_reg(adap, is_t4(adap->params.chip) ?
+ MPS_TRC_RSS_CONTROL :
+ MPS_T5_TRC_RSS_CONTROL,
RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
return 0;
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
- 0xf000, 0x11190,
+ 0xf000, 0x11110,
+ 0x11118, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x11088,
- 0x1109c, 0x1117c,
+ 0x1109c, 0x11110,
+ 0x11118, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ val);
if (ret < 0)
goto bye;
adap->vres.qp.start = val[0];
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
adap->params.max_ird_adapter = 32 * adap->tids.ntids;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a853133d8db8..41d04462b72e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
}
/*
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+ static const char *const reason[] = {
+ "Crash", /* PCIE_FW_EVAL_CRASH */
+ "During Device Preparation", /* PCIE_FW_EVAL_PREP */
+ "During Device Configuration", /* PCIE_FW_EVAL_CONF */
+ "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
+ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+ "Reserved", /* reserved */
+ };
+ u32 pcie_fw;
+
+ pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+ if (pcie_fw & FW_PCIE_FW_ERR)
+ dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+ reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+}
+
+/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
dump_mbox(adap, mbox, data_reg);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
+ t4_report_fw_error(adap);
return -ETIMEDOUT;
}
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
+#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
- addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ /* The VPD shall have a unique identifier specified by the PCI SIG.
+ * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+ * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+ * is expected to automatically put this entry at the
+ * beginning of the VPD.
+ */
+ addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
int fat;
- fat = t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
- t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
- is_t4(adapter->params.chip) ?
- pcie_intr_info : t5_pcie_intr_info);
+ if (is_t4(adapter->params.chip))
+ fat = t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ pcie_intr_info);
+ else
+ fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ t5_pcie_intr_info);
if (fat)
t4_fatal_err(adapter);
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
int fat;
+ if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ t4_report_fw_error(adapter);
+
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
{
u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
- if (status & MEM_PERR_INT_CAUSE)
+ if (status & MEM_PERR_INT_CAUSE) {
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ if (is_t5(adap->params.chip))
+ dev_alert(adap->pdev_dev,
+ "MA parity error, parity status %#x\n",
+ t4_read_reg(adap,
+ MA_PARITY_ERROR_STATUS2));
+ }
if (status & MEM_WRAP_INT_CAUSE) {
v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
@@ -2733,12 +2783,16 @@ retry:
/*
* Issue the HELLO command to the firmware. If it's not successful
* but indicates that we got a "busy" or "timeout" condition, retry
- * the HELLO until we exhaust our retry limit.
+ * the HELLO until we exhaust our retry limit. If we do exceed our
+ * retry limit, check to see if the firmware left us any error
+ * information and report that if so.
*/
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
+ if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ t4_report_fw_error(adap);
return ret;
}
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
t4_os_link_changed(adap, port, link_ok);
}
if (mod != pi->mod_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index e3146e83df20..39fb325474f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -511,6 +511,7 @@
#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_PARITY_ERROR_STATUS2 0x7804
#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
@@ -959,6 +960,7 @@
#define TRCMULTIFILTER 0x00000001U
#define MPS_TRC_RSS_CONTROL 0x9808
+#define MPS_T5_TRC_RSS_CONTROL 0xa00c
#define RSSCONTROL_MASK 0x00ff0000U
#define RSSCONTROL_SHIFT 16
#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 5f2729ebadbe..3409756a85b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd {
#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
FW_PCIE_FW_MASTER_MASK)
+#define FW_PCIE_FW_EVAL_MASK 0x7
+#define FW_PCIE_FW_EVAL_SHIFT 24
+#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
+ FW_PCIE_FW_EVAL_MASK)
struct fw_hdr {
u8 ver;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index a0b418e007a0..566b17db135a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
- if (skb->protocol != htons(ETH_P_IP))
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
return;
if (skb->ip_summed == CHECKSUM_PARTIAL)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cbc330b301cd..ad3d5d12173f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2674,7 +2674,8 @@ set_itr_now:
#define E1000_TX_FLAGS_VLAN_SHIFT 16
static int e1000_tso(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
}
static bool e1000_tx_csum(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
- switch (skb->protocol) {
+ switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int count = 0;
int tso;
unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
/* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = e1000_tso(adapter, tx_ring, skb);
+ tso = e1000_tso(adapter, tx_ring, skb, protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(hw->mac_type != e1000_82544))
tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
- } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+ } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
tx_flags |= E1000_TX_FLAGS_CSUM;
- if (likely(skb->protocol == htons(ETH_P_IP)))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 65c3aef2bd36..247335d2c7ec 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5164,7 +5164,8 @@ link_up:
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
return 1;
}
-static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_context_desc *context_desc;
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
- __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int count = 0;
int tso;
unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
if (test_bit(__E1000_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = e1000_tso(tx_ring, skb);
+ tso = e1000_tso(tx_ring, skb, protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (tso)
tx_flags |= E1000_TX_FLAGS_TSO;
- else if (e1000_tx_csum(tx_ring, skb))
+ else if (e1000_tx_csum(tx_ring, skb, protocol))
tx_flags |= E1000_TX_FLAGS_CSUM;
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must.
*/
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a51aa37b7b5a..369848e107f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
/* obtain protocol of skb */
- protocol = skb->protocol;
+ protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 79bf96ca6489..95a3ec236b49 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
/* obtain protocol of skb */
- protocol = skb->protocol;
+ protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c9f1d1b7ef37..ade067de1689 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -20,6 +20,7 @@
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
+ __be16 l3_proto = vlan_get_protocol(skb);
u8 l4_proto;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
return MVNETA_TX_L4_CSUM_NOT;
return mvneta_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVNETA_TX_L4_CSUM_NOT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb536aa613f4..abddcf8c40aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
int qpn, u64 *reg_id)
{
int err;
- struct mlx4_spec_list spec_eth_outer = { {NULL} };
- struct mlx4_spec_list spec_vxlan = { {NULL} };
- struct mlx4_spec_list spec_eth_inner = { {NULL} };
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_REGULAR,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */
- rule.port = priv->port;
- rule.qpn = qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
- memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
-
- spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
- spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
-
- list_add_tail(&spec_eth_outer.list, &rule.list);
- list_add_tail(&spec_vxlan.list, &rule.list);
- list_add_tail(&spec_eth_inner.list, &rule.list);
-
- err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
+ MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index d80e7a6fac74..ca0f98c95105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ rule.port = port;
+ rule.qpn = qpn;
+ rule.priority = prio;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_tunnel_steer_add);
+
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn)
{
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5020fd47825d..2f12c88c66ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
int rx_head = priv->rx_head;
int rx = 0;
- while (1) {
+ while (rx < budget) {
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
desc0 = readl(desc + RX_REG_OFFSET_DESC0);
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
net_dbg_ratelimited("packet error\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
- continue;
+ goto rx_next;
}
len = desc0 & RX_DESC0_FRAME_LEN_MASK;
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+ dma_sync_single_for_cpu(&ndev->dev,
+ priv->rx_mapping[rx_head],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ skb = netdev_alloc_skb_ip_align(ndev, len);
+
if (unlikely(!skb)) {
- net_dbg_ratelimited("build_skb failed\n");
+ net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
+ goto rx_next;
}
+ memcpy(skb->data, priv->rx_buf[rx_head], len);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&priv->napi, skb);
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (desc0 & RX_DESC0_MULTICAST)
priv->stats.multicast++;
+rx_next:
writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
rx_head = RX_NEXT(rx_head);
priv->rx_head = rx_head;
-
- if (rx >= budget)
- break;
}
if (rx < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);
}
priv->reg_imr |= RPKT_FINISH_M;
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
len = ETH_ZLEN;
}
- txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
- txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
- txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
- txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+ dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ priv->tx_buf_size, DMA_TO_DEVICE);
+
+ txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
+ if (tx_head == TX_DESC_NUM_MASK)
+ txdes1 |= TX_DESC1_END;
writel(txdes1, desc + TX_REG_OFFSET_DESC1);
writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
- priv->rx_buf_size = RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ priv->rx_buf_size = RX_BUF_SIZE;
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8706c0dbd0c3..a44a03c45014 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
__lpc_eth_clock_enable(pldat, true);
+ /* Suspended PHY makes LPC ethernet core block, so resume now */
+ phy_resume(pldat->phy_dev);
+
/* Reset and initialize */
__lpc_eth_reset(pldat);
__lpc_eth_init(pldat);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 188626e2a861..3e96f269150d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
if (skb_is_gso(skb)) {
int err;
+ __be16 l3_proto = vlan_get_protocol(skb);
err = skb_cow_head(skb, 0);
if (err < 0)
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
<< OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
- if (likely(skb->protocol == htons(ETH_P_IP))) {
+ if (likely(l3_proto == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9e757c792d84..196e98a2d93b 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -5,6 +5,7 @@
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
+ depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index c553f6b5a913..cf28daba4346 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
#include "stmmac.h"
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i),
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index de507c32036c..593e6c4144a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -220,10 +220,10 @@ enum dma_irq_status {
handle_tx = 0x8,
};
-#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
-#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
-#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
-#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
#define CORE_PCS_ANE_COMPLETE (1 << 5)
#define CORE_PCS_LINK_STATUS (1 << 6)
@@ -287,7 +287,7 @@ struct dma_features {
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
-#define STMMAC_DEFAULT_TWT_LS 0x0
+#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
@@ -425,7 +425,7 @@ struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
int (*set_16kib_bfsize)(int mtu);
void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
@@ -445,6 +445,7 @@ struct mac_device_info {
int multicast_filter_bins;
int unicast_filter_entries;
int mcast_bits_log2;
+ unsigned int rx_csum;
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 71b5419256c1..64d8f56a9c17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -153,7 +153,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_BE)
+ GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d8ef18786a1c..5efe60ea6526 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CONTROL_IPC;
+ if (hw->rx_csum)
+ value |= GMAC_CONTROL_IPC;
+ else
+ value &= ~GMAC_CONTROL_IPC;
+
writel(value, ioaddr + GMAC_CONTROL);
value = readl(ioaddr + GMAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 8607488cbcfc..192c2491330b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -68,7 +68,7 @@ struct stmmac_counters {
unsigned int mmc_rx_octetcount_g;
unsigned int mmc_rx_broadcastframe_g;
unsigned int mmc_rx_multicastframe_g;
- unsigned int mmc_rx_crc_errror;
+ unsigned int mmc_rx_crc_error;
unsigned int mmc_rx_align_error;
unsigned int mmc_rx_run_error;
unsigned int mmc_rx_jabber_error;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 50617c5a0bdb..08c483bd2ec7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 650a4be6bce5..5dd50c6cda5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -28,7 +28,7 @@
#include "stmmac.h"
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE);
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ca01035634a7..58097c0e2ad5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/reset.h>
+struct stmmac_tx_info {
+ dma_addr_t buf;
+ bool map_as_page;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -45,7 +50,7 @@ struct stmmac_priv {
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
- dma_addr_t *tx_skbuff_dma;
+ struct stmmac_tx_info *tx_skbuff_dma;
dma_addr_t dma_tx_phy;
int tx_coalesce;
int hwts_tx_en;
@@ -105,6 +110,8 @@ struct stmmac_priv {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
+ struct clk *clk_ptp_ref;
+ unsigned int clk_ptp_rate;
u32 adv_ts;
int use_riwt;
int irq_wake;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9af50bae4dde..cf4f38db1c0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
- STMMAC_MMC_STAT(mmc_rx_crc_errror),
+ STMMAC_MMC_STAT(mmc_rx_crc_error),
STMMAC_MMC_STAT(mmc_rx_align_error),
STMMAC_MMC_STAT(mmc_rx_run_error),
STMMAC_MMC_STAT(mmc_rx_jabber_error),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 08addd653728..6e6ee226de04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
+ char *phy_bus_name = priv->plat->phy_bus_name;
bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
+ /* Never init EEE in case of a switch is attached */
+ if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+ goto out;
+
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
int tx_lpi_timer = priv->tx_lpi_timer;
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
tx_lpi_timer);
- } else
- /* Set HW EEE according to the speed */
- priv->hw->mac->set_eee_pls(priv->hw,
- priv->phydev->link);
+ }
+ /* Set HW EEE according to the speed */
+ priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* calculate default added value:
* formula is :
* addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
- * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
- * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+ * where, freq_div_ratio = clk_ptp_ref_i/50MHz
+ * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
+ * NOTE: clk_ptp_ref_i should be >= 50MHz to
* achive 20ns accuracy.
*
* 2^x * y == (y << x), hence
* 2^32 * 50000000 ==> (50000000 << 32)
*/
temp = (u64) (50000000ULL << 32);
- priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+ priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
priv->hw->ptp->config_addend(priv->ioaddr,
priv->default_addend);
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
+ /* Fall-back to main clock in case of no PTP ref is passed */
+ priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+ if (IS_ERR(priv->clk_ptp_ref)) {
+ priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+ priv->clk_ptp_ref = NULL;
+ } else {
+ clk_prepare_enable(priv->clk_ptp_ref);
+ priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+ }
+
priv->adv_ts = 0;
if (priv->dma_cap.atime_stamp && priv->extend_desc)
priv->adv_ts = 1;
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
+ if (priv->clk_ptp_ref)
+ clk_disable_unprepare(priv->clk_ptp_ref);
stmmac_ptp_unregister(priv);
}
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_tx + i;
p->des2 = 0;
- priv->tx_skbuff_dma[i] = 0;
+ priv->tx_skbuff_dma[i].buf = 0;
+ priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff[i] = NULL;
}
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
else
p = priv->dma_tx + i;
- if (priv->tx_skbuff_dma[i]) {
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
- priv->tx_skbuff_dma[i] = 0;
+ if (priv->tx_skbuff_dma[i].buf) {
+ if (priv->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ priv->tx_skbuff_dma[i].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
}
if (priv->tx_skbuff[i] != NULL) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
+ priv->tx_skbuff_dma[i].buf = 0;
+ priv->tx_skbuff_dma[i].map_as_page = false;
}
}
}
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
if (!priv->rx_skbuff)
goto err_rx_skbuff;
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ priv->tx_skbuff_dma = kmalloc_array(txsize,
+ sizeof(*priv->tx_skbuff_dma),
GFP_KERNEL);
if (!priv->tx_skbuff_dma)
goto err_tx_skbuff_dma;
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
pr_debug("%s: curr %d, dirty %d\n", __func__,
priv->cur_tx, priv->dirty_tx);
- if (likely(priv->tx_skbuff_dma[entry])) {
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[entry],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = 0;
+ if (likely(priv->tx_skbuff_dma[entry].buf)) {
+ if (priv->tx_skbuff_dma[entry].map_as_page)
+ dma_unmap_page(priv->device,
+ priv->tx_skbuff_dma[entry].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[entry].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].map_as_page = false;
}
priv->hw->mode->clean_desc3(priv, p);
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
+ ret = priv->hw->mac->rx_ipc(priv->hw);
+ if (!ret) {
+ pr_warn(" RX IPC Checksum Offload disabled\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ priv->hw->rx_csum = 0;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ goto dma_map_err;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
} else {
desc = first;
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ if (unlikely(entry < 0))
+ goto dma_map_err;
}
for (i = 0; i < nfrags; i++) {
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ goto dma_map_err; /* should reuse desc w/o issues */
+
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].map_as_page = true;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+dma_map_err:
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
priv->rx_skbuff_dma[entry] =
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
-
+ if (dma_mapping_error(priv->device,
+ priv->rx_skbuff_dma[entry])) {
+ dev_err(priv->device, "Rx dma map failed\n");
+ dev_kfree_skb(skb);
+ break;
+ }
p->des2 = priv->rx_skbuff_dma[entry];
priv->hw->mode->refill_desc3(priv, p);
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
unsigned int entry = priv->cur_rx % rxsize;
unsigned int next_entry;
unsigned int count = 0;
- int coe = priv->plat->rx_coe;
+ int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
pr_debug("%s: descriptor ring:\n", __func__);
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
features &= ~NETIF_F_RXCSUM;
- else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
- features &= ~NETIF_F_IPV6_CSUM;
+
if (!priv->plat->tx_coe)
features &= ~NETIF_F_ALL_CSUM;
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
return features;
}
+static int stmmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ /* Keep the COE Type in case of csum is supporting */
+ if (features & NETIF_F_RXCSUM)
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ else
+ priv->hw->rx_csum = 0;
+ /* No check needed because rx_coe has been set before and it will be
+ * fixed in case of issue.
+ */
+ priv->hw->mac->rx_ipc(priv->hw);
+
+ return 0;
+}
+
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
.ndo_fix_features = stmmac_fix_features,
+ .ndo_set_features = stmmac_set_features,
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- int ret;
struct mac_device_info *mac;
/* Identify the MAC HW device */
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* To use alternate (extended) or normal descriptor structures */
stmmac_selec_desc_mode(priv);
- ret = priv->hw->mac->rx_ipc(priv->hw);
- if (!ret) {
- pr_warn(" RX IPC Checksum Offload not configured.\n");
- priv->plat->rx_coe = STMMAC_RX_COE_NONE;
- }
-
- if (priv->plat->rx_coe)
+ if (priv->plat->rx_coe) {
+ priv->hw->rx_csum = priv->plat->rx_coe;
pr_info(" RX Checksum Offload Engine supported (type %d)\n",
priv->plat->rx_coe);
+ }
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b7ad3565566c..c5ee79d8a8c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
{
if (priv->ptp_clock) {
ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 3dbc047622fa..4535df37c227 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -25,8 +25,6 @@
#ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__
-#define STMMAC_SYSCLOCK 62500000
-
/* IEEE 1588 PTP register offsets */
#define PTP_TCR 0x0700 /* Timestamp Control Reg */
#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index c1ba26c06d73..3de2f0d15fe2 100644
--- a/drivers/net/fddi/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
@@ -147,11 +147,6 @@
#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */
#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */
-/* PCI_BASE_2ND 32 bit 2nd Base address */
-#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */
-#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */
-#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */
-
/* PCI_SUB_VID 16 bit Subsystem Vendor ID */
/* PCI_SUB_ID 16 bit Subsystem ID */
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c94e2a27446a..a854d38c231d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
/* First check if the EEE ability is supported */
eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
MDIO_MMD_PCS, phydev->addr);
- if (eee_cap < 0)
- return eee_cap;
+ if (eee_cap <= 0)
+ goto eee_exit_err;
cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
if (!cap)
- return -EPROTONOSUPPORT;
+ goto eee_exit_err;
/* Check which link settings negotiated and verify it in
* the EEE advertising registers.
*/
eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN, phydev->addr);
- if (eee_lp < 0)
- return eee_lp;
+ if (eee_lp <= 0)
+ goto eee_exit_err;
eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, phydev->addr);
- if (eee_adv < 0)
- return eee_adv;
+ if (eee_adv <= 0)
+ goto eee_exit_err;
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
if (!(lp & adv & settings[idx].setting))
- return -EPROTONOSUPPORT;
+ goto eee_exit_err;
if (clk_stop_enable) {
/* Configure the PHY to stop receiving xMII
@@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
return 0; /* EEE supported */
}
-
+eee_exit_err:
return -EPROTONOSUPPORT;
}
EXPORT_SYMBOL(phy_init_eee);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d6e90c72c257..6dfcbf523936 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev)
if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
- new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
netdev_mc_count(netdev) * ETH_ALEN);
new_table_pa = dma_map_single(
@@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev)
new_table,
rxConf->mfTableLen,
PCI_DMA_TODEVICE);
+ }
+
+ if (new_table_pa) {
+ new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else {
- netdev_info(netdev, "failed to copy mcast list"
- ", setting ALL_MULTI\n");
+ netdev_info(netdev,
+ "failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
-
if (!(new_mode & VMXNET3_RXM_MCAST)) {
rxConf->mfTableLen = 0;
rxConf->mfTablePA = 0;
@@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table) {
+ if (new_table_pa)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
- kfree(new_table);
- }
+ kfree(new_table);
}
void
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 29ee77f2c97f..3759479f959a 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01020100
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1fb7b37d1402..beb377b2d4b7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
} else if (vxlan->flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
- .sa.sa_family = AF_INET,
+ .sin.sin_family = AF_INET,
};
vxlan_ip_miss(dev, &ipa);
@@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
} else if (vxlan->flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
- .sa.sa_family = AF_INET6,
+ .sin6.sin6_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
@@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = pip->daddr,
- .sa.sa_family = AF_INET,
+ .sin.sin_family = AF_INET,
};
vxlan_ip_miss(dev, &ipa);
@@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin6.sin6_addr = pip6->daddr,
- .sa.sa_family = AF_INET6,
+ .sin6.sin6_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 334c2ece855a..da92bfa76b7c 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv)
kfree_skb(priv->rx_skb);
- usb_put_dev(priv->udev);
-
at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
__func__);
ieee80211_free_hw(priv->hw);
@@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface)
wiphy_info(priv->hw->wiphy, "disconnecting\n");
at76_delete_device(priv);
+ usb_put_dev(priv->udev);
dev_info(&interface->dev, "disconnected\n");
}
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c
index 5fe29b9f8fa2..8f68426ca653 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.c
+++ b/drivers/net/wireless/ath/ath9k/spectral.c
@@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
if (strncmp("trigger", buf, 7) == 0) {
ath9k_spectral_scan_trigger(sc->hw);
- } else if (strncmp("background", buf, 9) == 0) {
+ } else if (strncmp("background", buf, 10) == 0) {
ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
} else if (strncmp("chanscan", buf, 8) == 0) {
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 6451d2b6abcf..824f5e287783 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -51,7 +51,6 @@ config IWLWIFI_LEDS
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
- depends on m
default IWLWIFI
help
This is the driver that supports the DVM firmware which is
@@ -60,7 +59,6 @@ config IWLDVM
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
- depends on m
help
This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 6dc5dd3ced44..ed50de6362ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* recalculate basic rates */
iwl_calc_basic_rates(priv, ctx);
+ /*
+ * force CTS-to-self frames protection if RTS-CTS is not preferred
+ * one aggregation protection method
+ */
+ if (!priv->hw_params.use_rts_for_aggregation)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 48730064da73..d67a37a786aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 9
-#define IWL3160_UCODE_API_MAX 9
+#define IWL7260_UCODE_API_MAX 10
+#define IWL3160_UCODE_API_MAX 10
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 9
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 44b19e015102..e93c6972290b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -67,7 +67,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 9
+#define IWL8000_UCODE_API_MAX 10
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 8
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
index 33da3dfcfa4f..d4bd550f505c 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter)
bool is_legacy = false;
- if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+ if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
is_legacy = true;
return is_legacy;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 361435f8608a..1ac6383e7947 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e29e15dca86e..f379689dde30 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+ XENVIF_NAPI_WEIGHT);
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
@@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
wake_up_process(queue->task);
wake_up_process(queue->dealloc_task);
- netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
- XENVIF_NAPI_WEIGHT);
-
return 0;
err_rx_unbind:
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 9dd63b822025..e9bf2f47b61a 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -510,7 +510,7 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
WARN_ON(nt->mw[mw_num].virt_addr == NULL);
- if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+ if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
num_qps_mw = nt->max_qps / mw_max + 1;
else
num_qps_mw = nt->max_qps / mw_max;
@@ -576,6 +576,19 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
return -ENOMEM;
}
+ /*
+ * we must ensure that the memory address allocated is BAR size
+ * aligned in order for the XLAT register to take the value. This
+ * is a requirement of the hardware. It is recommended to setup CMA
+ * for BAR sizes equal or greater than 4MB.
+ */
+ if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
+ dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
+ &mw->dma_addr);
+ ntb_free_mw(nt, num_mw);
+ return -ENOMEM;
+ }
+
/* Notify HW the memory location of the receive buffer */
ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
@@ -856,7 +869,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
qp->client_ready = NTB_LINK_DOWN;
qp->event_handler = NULL;
- if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+ if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
num_qps_mw = nt->max_qps / mw_max + 1;
else
num_qps_mw = nt->max_qps / mw_max;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 9eae9834bcc7..a0580afe1713 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -913,7 +913,7 @@ static int __init dino_probe(struct parisc_device *dev)
printk("%s version %s found at 0x%lx\n", name, version, hpa);
if (!request_mem_region(hpa, PAGE_SIZE, name)) {
- printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
+ printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n",
hpa);
return 1;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0dd742719154..4ff8cbb620d3 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -41,9 +41,9 @@ config PHY_MVEBU_SATA
config PHY_MIPHY365X
tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
depends on ARCH_STI
- depends on GENERIC_PHY
depends on HAS_IOMEM
depends on OF
+ select GENERIC_PHY
help
Enable this to support the miphy transceiver (for SATA/PCIE)
that is part of STMicroelectronics STiH41x SoC series.
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index b05302b09c9f..392101c8d6b0 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -542,6 +542,7 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match);
static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
{
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index e1a6623d4696..9cd33a4bcfb1 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -34,6 +34,7 @@
#include <linux/delay.h>
#include <linux/usb/otg.h>
#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
#include <linux/usb/musb-omap.h>
#include <linux/usb/ulpi.h>
#include <linux/i2c/twl.h>
@@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
}
}
-static int twl4030_phy_power_off(struct phy *phy)
+static int twl4030_usb_runtime_suspend(struct device *dev)
{
- struct twl4030_usb *twl = phy_get_drvdata(phy);
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+ dev_dbg(twl->dev, "%s\n", __func__);
if (twl->asleep)
return 0;
twl4030_phy_power(twl, 0);
twl->asleep = 1;
- dev_dbg(twl->dev, "%s\n", __func__);
+
return 0;
}
-static void __twl4030_phy_power_on(struct twl4030_usb *twl)
+static int twl4030_usb_runtime_resume(struct device *dev)
{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ dev_dbg(twl->dev, "%s\n", __func__);
+ if (!twl->asleep)
+ return 0;
+
twl4030_phy_power(twl, 1);
- twl4030_i2c_access(twl, 1);
- twl4030_usb_set_mode(twl, twl->usb_mode);
- if (twl->usb_mode == T2_USB_MODE_ULPI)
- twl4030_i2c_access(twl, 0);
+ twl->asleep = 0;
+
+ return 0;
+}
+
+static int twl4030_phy_power_off(struct phy *phy)
+{
+ struct twl4030_usb *twl = phy_get_drvdata(phy);
+
+ dev_dbg(twl->dev, "%s\n", __func__);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_autosuspend(twl->dev);
+
+ return 0;
}
static int twl4030_phy_power_on(struct phy *phy)
{
struct twl4030_usb *twl = phy_get_drvdata(phy);
- if (!twl->asleep)
- return 0;
- __twl4030_phy_power_on(twl);
- twl->asleep = 0;
dev_dbg(twl->dev, "%s\n", __func__);
+ pm_runtime_get_sync(twl->dev);
+ twl4030_i2c_access(twl, 1);
+ twl4030_usb_set_mode(twl, twl->usb_mode);
+ if (twl->usb_mode == T2_USB_MODE_ULPI)
+ twl4030_i2c_access(twl, 0);
/*
* XXX When VBUS gets driven after musb goes to A mode,
@@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
* USB_LINK_VBUS state. musb_hdrc won't care until it
* starts to handle softconnect right.
*/
+ if ((status == OMAP_MUSB_VBUS_VALID) ||
+ (status == OMAP_MUSB_ID_GROUND)) {
+ if (twl->asleep)
+ pm_runtime_get_sync(twl->dev);
+ } else {
+ if (!twl->asleep) {
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_autosuspend(twl->dev);
+ }
+ }
omap_musb_mailbox(status);
}
- sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+
+ /* don't schedule during sleep - irq works right then */
+ if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
+ cancel_delayed_work(&twl->id_workaround_work);
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
+ }
+
+ if (irq)
+ sysfs_notify(&twl->dev->kobj, NULL, "vbus");
return IRQ_HANDLED;
}
@@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work)
{
struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
id_workaround_work.work);
- enum omap_musb_vbus_id_status status;
- bool status_changed = false;
-
- status = twl4030_usb_linkstat(twl);
-
- spin_lock_irq(&twl->lock);
- if (status >= 0 && status != twl->linkstat) {
- twl->linkstat = status;
- status_changed = true;
- }
- spin_unlock_irq(&twl->lock);
-
- if (status_changed) {
- dev_dbg(twl->dev, "handle missing status change to %d\n",
- status);
- omap_musb_mailbox(status);
- }
- /* don't schedule during sleep - irq works right then */
- if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
- cancel_delayed_work(&twl->id_workaround_work);
- schedule_delayed_work(&twl->id_workaround_work, HZ);
- }
+ twl4030_usb_irq(0, twl);
}
static int twl4030_phy_init(struct phy *phy)
@@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy)
struct twl4030_usb *twl = phy_get_drvdata(phy);
enum omap_musb_vbus_id_status status;
- /*
- * Start in sleep state, we'll get called through set_suspend()
- * callback when musb is runtime resumed and it's time to start.
- */
- __twl4030_phy_power(twl, 0);
- twl->asleep = 1;
-
+ pm_runtime_get_sync(twl->dev);
status = twl4030_usb_linkstat(twl);
twl->linkstat = status;
- if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
+ if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
omap_musb_mailbox(twl->linkstat);
- twl4030_phy_power_on(phy);
- }
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_autosuspend(twl->dev);
+
return 0;
}
@@ -650,6 +661,11 @@ static const struct phy_ops ops = {
.owner = THIS_MODULE,
};
+static const struct dev_pm_ops twl4030_usb_pm_ops = {
+ SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
+ twl4030_usb_runtime_resume, NULL)
+};
+
static int twl4030_usb_probe(struct platform_device *pdev)
{
struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
@@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
@@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev)
return status;
}
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(twl->dev);
+
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
}
@@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
struct twl4030_usb *twl = platform_get_drvdata(pdev);
int val;
+ pm_runtime_get_sync(twl->dev);
cancel_delayed_work(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
@@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
/* disable complete OTG block */
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
-
- if (!twl->asleep)
- twl4030_phy_power(twl, 0);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put(twl->dev);
return 0;
}
@@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = {
.remove = twl4030_usb_remove,
.driver = {
.name = "twl4030_usb",
+ .pm = &twl4030_usb_pm_ops,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl4030_usb_id_table),
},
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 9ca59a018743..e12e5b07f6d7 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -461,6 +461,7 @@ static struct irq_chip byt_irqchip = {
.irq_mask = byt_irq_mask,
.irq_unmask = byt_irq_unmask,
.irq_set_type = byt_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2ead7e78c456..14ba80bfa571 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx);
* strings when running as a module.
*/
static char *dasd[256];
-module_param_array(dasd, charp, NULL, 0);
+module_param_array(dasd, charp, NULL, S_IRUGO);
/*
* Single spinlock to protect devmap and servermap structures and lists.
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 97ef37b51068..e7646ce3d659 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -889,6 +889,7 @@ extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
extern struct workqueue_struct *qeth_wq;
+int qeth_card_hw_is_reachable(struct qeth_card *);
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c0d6ba8655c7..fd22c811cbe1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,6 +73,13 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
struct workqueue_struct *qeth_wq;
EXPORT_SYMBOL_GPL(qeth_wq);
+int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+ return (card->state == CARD_STATE_SOFTSETUP) ||
+ (card->state == CARD_STATE_UP);
+}
+EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
+
static void qeth_close_dev_handler(struct work_struct *work)
{
struct qeth_card *card;
@@ -5790,6 +5797,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
struct carrier_info carrier_info;
+ int rc;
u32 speed;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
@@ -5832,8 +5840,14 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
/* Check if we can obtain more accurate information. */
/* If QUERY_CARD_INFO command is not supported or fails, */
/* just return the heuristics that was filled above. */
- if (qeth_query_card_info(card, &carrier_info) != 0)
+ if (!qeth_card_hw_is_reachable(card))
+ return -ENODEV;
+ rc = qeth_query_card_info(card, &carrier_info);
+ if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
return 0;
+ if (rc) /* report error from the hardware operation */
+ return rc;
+ /* on success, fill in the information got from the hardware */
netdev_dbg(netdev,
"card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index ae1bc04b8653..59e3aa538b4d 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -5,17 +5,12 @@
#include <linux/slab.h>
#include <asm/ebcdic.h>
+#include "qeth_core.h"
#include "qeth_l2.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
-static int qeth_card_hw_is_reachable(struct qeth_card *card)
-{
- return (card->state == CARD_STATE_SOFTSETUP) ||
- (card->state == CARD_STATE_UP);
-}
-
static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
struct device_attribute *attr, char *buf,
int show_state)
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 19396dc4ee47..bed2fedeb057 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -38,6 +38,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index e7b2e0234196..69139ce7420d 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -199,7 +199,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
fence->num_fences = 1;
atomic_set(&fence->status, 1);
- fence_get(&pt->base);
fence->cbs[0].sync_pt = &pt->base;
fence->cbs[0].fence = fence;
if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index 7e3f019d7e72..4662e00b456a 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -574,6 +574,9 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
for (i = 0; i < 2; i++) {
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+ if (!channel->connector.funcs)
+ continue;
+
channel->connector.funcs->destroy(&channel->connector);
channel->encoder.funcs->destroy(&channel->encoder);
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index 6f393a11f44d..50de10a550e9 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -281,7 +281,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
ipu_idmac_put(ipu_plane->ipu_ch);
ipu_dmfc_put(ipu_plane->dmfc);
- ipu_dp_put(ipu_plane->dp);
+ if (ipu_plane->dp)
+ ipu_dp_put(ipu_plane->dp);
}
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 0367f5a2cfe4..0c59e26c0805 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (sb->s_root == NULL) {
CERROR("%s: can't make root dentry\n",
ll_get_fsname(sb, NULL, 0));
- GOTO(out_root, err = -ENOMEM);
+ GOTO(out_lock_cn_cb, err = -ENOMEM);
}
sbi->ll_sdev_orig = sb->s_dev;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4db7987ec225..57d9df84ce5d 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -540,6 +540,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT3434", 0 },
{ "INT3435", 0 },
{ "80860F0A", 0 },
+ { "8086228A", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 7b63677475c1..d7d4584549a5 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -527,6 +527,45 @@ static void atmel_enable_ms(struct uart_port *port)
}
/*
+ * Disable modem status interrupts
+ */
+static void atmel_disable_ms(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ uint32_t idr = 0;
+
+ /*
+ * Interrupt should not be disabled twice
+ */
+ if (!atmel_port->ms_irq_enabled)
+ return;
+
+ atmel_port->ms_irq_enabled = false;
+
+ if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
+ disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
+ else
+ idr |= ATMEL_US_CTSIC;
+
+ if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
+ disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
+ else
+ idr |= ATMEL_US_DSRIC;
+
+ if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
+ disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
+ else
+ idr |= ATMEL_US_RIIC;
+
+ if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
+ disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
+ else
+ idr |= ATMEL_US_DCDIC;
+
+ UART_PUT_IDR(port, idr);
+}
+
+/*
* Control the transmission of a break signal
*/
static void atmel_break_ctl(struct uart_port *port, int break_state)
@@ -1993,7 +2032,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* CTS flow-control and modem-status interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
- port->ops->enable_ms(port);
+ atmel_enable_ms(port);
+ else
+ atmel_disable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
}
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 01951d27cc03..806e4bcadbd7 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -581,7 +581,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
{
unsigned int status;
- status = cdns_uart_readl(CDNS_UART_ISR_OFFSET) & CDNS_UART_IXR_TXEMPTY;
+ status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY;
return status ? TIOCSER_TEMT : 0;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index d72b9d2de2c5..4935ac38fd00 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -20,13 +20,13 @@
static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
{
struct device *dev = ci->gadget.dev.parent;
- int val;
switch (event) {
case CI_HDRC_CONTROLLER_RESET_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
writel(0, USB_AHBBURST);
writel(0, USB_AHBMODE);
+ usb_phy_init(ci->transceiver);
break;
case CI_HDRC_CONTROLLER_STOPPED_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
* Put the transceiver in non-driving mode. Otherwise host
* may not detect soft-disconnection.
*/
- val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
+ usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
break;
default:
dev_dbg(dev, "unknown ci_hdrc event\n");
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 46f5161c7891..d481c99a20d7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5024,9 +5024,10 @@ static void hub_events(void)
hub = list_entry(tmp, struct usb_hub, event_list);
kref_get(&hub->kref);
+ hdev = hub->hdev;
+ usb_get_dev(hdev);
spin_unlock_irq(&hub_event_lock);
- hdev = hub->hdev;
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
@@ -5139,6 +5140,7 @@ static void hub_events(void)
usb_autopm_put_interface(intf);
loop_disconnected:
usb_unlock_device(hdev);
+ usb_put_dev(hdev);
kref_put(&hub->kref, hub_release);
} /* end while (1) */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 7c9618e916e2..ce6071d65d51 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1649,6 +1649,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
dev_err(hsotg->dev,
"%s: timeout flushing fifo (GRSTCTL=%08x)\n",
__func__, val);
+ break;
}
udelay(1);
@@ -2747,13 +2748,14 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
- if (hsotg->phy) {
- phy_init(hsotg->phy);
- phy_power_on(hsotg->phy);
- } else if (hsotg->uphy)
+ if (hsotg->uphy)
usb_phy_init(hsotg->uphy);
- else if (hsotg->plat->phy_init)
+ else if (hsotg->plat && hsotg->plat->phy_init)
hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
+ else {
+ phy_init(hsotg->phy);
+ phy_power_on(hsotg->phy);
+ }
}
/**
@@ -2767,13 +2769,14 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
- if (hsotg->phy) {
- phy_power_off(hsotg->phy);
- phy_exit(hsotg->phy);
- } else if (hsotg->uphy)
+ if (hsotg->uphy)
usb_phy_shutdown(hsotg->uphy);
- else if (hsotg->plat->phy_exit)
+ else if (hsotg->plat && hsotg->plat->phy_exit)
hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
+ else {
+ phy_power_off(hsotg->phy);
+ phy_exit(hsotg->phy);
+ }
}
/**
@@ -2892,13 +2895,11 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
return -ENODEV;
/* all endpoints should be shutdown */
- for (ep = 0; ep < hsotg->num_of_eps; ep++)
+ for (ep = 1; ep < hsotg->num_of_eps; ep++)
s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
spin_lock_irqsave(&hsotg->lock, flags);
- s3c_hsotg_phy_disable(hsotg);
-
if (!driver)
hsotg->driver = NULL;
@@ -2941,7 +2942,6 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
s3c_hsotg_phy_enable(hsotg);
s3c_hsotg_core_init(hsotg);
} else {
- s3c_hsotg_disconnect(hsotg);
s3c_hsotg_phy_disable(hsotg);
}
@@ -3441,13 +3441,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
hsotg->irq = ret;
- ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
- dev_name(dev), hsotg);
- if (ret < 0) {
- dev_err(dev, "cannot claim IRQ\n");
- goto err_clk;
- }
-
dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
@@ -3488,9 +3481,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
if (hsotg->phy && (phy_get_bus_width(phy) == 8))
hsotg->phyif = GUSBCFG_PHYIF8;
- if (hsotg->phy)
- phy_init(hsotg->phy);
-
/* usb phy enable */
s3c_hsotg_phy_enable(hsotg);
@@ -3498,6 +3488,17 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
s3c_hsotg_init(hsotg);
s3c_hsotg_hw_cfg(hsotg);
+ ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
+ dev_name(dev), hsotg);
+ if (ret < 0) {
+ s3c_hsotg_phy_disable(hsotg);
+ clk_disable_unprepare(hsotg->clk);
+ regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ dev_err(dev, "cannot claim IRQ\n");
+ goto err_clk;
+ }
+
/* hsotg->num_of_eps holds number of EPs other than ep0 */
if (hsotg->num_of_eps == 0) {
@@ -3582,9 +3583,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
usb_gadget_unregister_driver(hsotg->driver);
}
- s3c_hsotg_phy_disable(hsotg);
- if (hsotg->phy)
- phy_exit(hsotg->phy);
clk_disable_unprepare(hsotg->clk);
return 0;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f2050e844b4b..b0f4d52b7f04 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -796,20 +796,21 @@ static int dwc3_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
+ dwc3_debugfs_exit(dwc);
+ dwc3_core_exit_mode(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+ dwc3_free_event_buffers(dwc);
+
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ dwc3_core_exit(dwc);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- dwc3_debugfs_exit(dwc);
- dwc3_core_exit_mode(dwc);
- dwc3_event_buffers_cleanup(dwc);
- dwc3_free_event_buffers(dwc);
- dwc3_core_exit(dwc);
-
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 3ef5e4fc315b..2f537d588225 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -574,9 +574,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
if (omap->extcon_id_dev.edev)
extcon_unregister_interest(&omap->extcon_id_dev);
dwc3_omap_disable_irqs(omap);
+ device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f2dbaca5df2c..0fcc0a35ae05 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -437,7 +437,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
dep->stream_capable = true;
}
- if (usb_endpoint_xfer_isoc(desc))
+ if (!usb_endpoint_xfer_control(desc))
params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
/*
@@ -1137,9 +1137,11 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
int ret;
+ spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -ESHUTDOWN;
}
@@ -1147,7 +1149,6 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
request, ep->name, request->length);
trace_dwc3_ep_queue(req);
- spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_queue(dep, req);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1952,12 +1953,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERINPROGRESS:
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
- dep->name);
- return;
- }
-
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a86b0a231a8c..e4afe8df5553 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -155,6 +155,12 @@ struct ffs_io_data {
struct usb_request *req;
};
+struct ffs_desc_helper {
+ struct ffs_data *ffs;
+ unsigned interfaces_count;
+ unsigned eps_count;
+};
+
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
@@ -1853,7 +1859,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
u8 *valuep, struct usb_descriptor_header *desc,
void *priv)
{
- struct ffs_data *ffs = priv;
+ struct ffs_desc_helper *helper = priv;
+ struct usb_endpoint_descriptor *d;
ENTER();
@@ -1867,8 +1874,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
* encountered interface "n" then there are at least
* "n+1" interfaces.
*/
- if (*valuep >= ffs->interfaces_count)
- ffs->interfaces_count = *valuep + 1;
+ if (*valuep >= helper->interfaces_count)
+ helper->interfaces_count = *valuep + 1;
break;
case FFS_STRING:
@@ -1876,14 +1883,22 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
* Strings are indexed from 1 (0 is magic ;) reserved
* for languages list or some such)
*/
- if (*valuep > ffs->strings_count)
- ffs->strings_count = *valuep;
+ if (*valuep > helper->ffs->strings_count)
+ helper->ffs->strings_count = *valuep;
break;
case FFS_ENDPOINT:
- /* Endpoints are indexed from 1 as well. */
- if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
- ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+ d = (void *)desc;
+ helper->eps_count++;
+ if (helper->eps_count >= 15)
+ return -EINVAL;
+ /* Check if descriptors for any speed were already parsed */
+ if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
+ helper->ffs->eps_addrmap[helper->eps_count] =
+ d->bEndpointAddress;
+ else if (helper->ffs->eps_addrmap[helper->eps_count] !=
+ d->bEndpointAddress)
+ return -EINVAL;
break;
}
@@ -2076,6 +2091,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
char *data = _data, *raw_descs;
unsigned os_descs_count = 0, counts[3], flags;
int ret = -EINVAL, i;
+ struct ffs_desc_helper helper;
ENTER();
@@ -2124,13 +2140,29 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
/* Read descriptors */
raw_descs = data;
+ helper.ffs = ffs;
for (i = 0; i < 3; ++i) {
if (!counts[i])
continue;
+ helper.interfaces_count = 0;
+ helper.eps_count = 0;
ret = ffs_do_descs(counts[i], data, len,
- __ffs_data_do_entity, ffs);
+ __ffs_data_do_entity, &helper);
if (ret < 0)
goto error;
+ if (!ffs->eps_count && !ffs->interfaces_count) {
+ ffs->eps_count = helper.eps_count;
+ ffs->interfaces_count = helper.interfaces_count;
+ } else {
+ if (ffs->eps_count != helper.eps_count) {
+ ret = -EINVAL;
+ goto error;
+ }
+ if (ffs->interfaces_count != helper.interfaces_count) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
data += ret;
len -= ret;
}
@@ -2366,9 +2398,18 @@ static void ffs_event_add(struct ffs_data *ffs,
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
}
-
/* Bind/unbind USB function hooks *******************************************/
+static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
+ if (ffs->eps_addrmap[i] == endpoint_address)
+ return i;
+ return -ENOENT;
+}
+
static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
struct usb_descriptor_header *desc,
void *priv)
@@ -2403,7 +2444,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return 0;
- idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+ idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
+ if (idx < 0)
+ return idx;
+
ffs_ep = func->eps + idx;
if (unlikely(ffs_ep->descs[ep_desc_id])) {
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 63d6e71569c1..d48897e8ffeb 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -224,6 +224,8 @@ struct ffs_data {
void *ms_os_descs_ext_prop_name_avail;
void *ms_os_descs_ext_prop_data_avail;
+ u8 eps_addrmap[15];
+
unsigned short strings_count;
unsigned short interfaces_count;
unsigned short eps_count;
diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h
index ae811d8d38b4..ad39f892d200 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.h
+++ b/drivers/usb/gadget/udc/fusb300_udc.h
@@ -12,7 +12,7 @@
#ifndef __FUSB300_UDC_H__
-#define __FUSB300_UDC_H_
+#define __FUSB300_UDC_H__
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f4eac113690e..2e95715b50c0 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -3320,7 +3320,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
if (stat & tmp) {
writel(tmp, &dev->regs->irqstat1);
if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
- (readl(&dev->usb->usbstat) & mask)) ||
+ ((readl(&dev->usb->usbstat) & mask) == 0)) ||
((readl(&dev->usb->usbctl) &
BIT(VBUS_PIN)) == 0)) &&
(dev->gadget.speed != USB_SPEED_UNKNOWN)) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index aa79e8749040..69aece31143a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -468,7 +468,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
}
/* Updates Link Status for super Speed port */
-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
+static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+ u32 *status, u32 status_reg)
{
u32 pls = status_reg & PORT_PLS_MASK;
@@ -507,7 +508,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
* in which sometimes the port enters compliance mode
* caused by a delay on the host-device negotiation.
*/
- if (pls == USB_SS_PORT_LS_COMP_MOD)
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+ (pls == USB_SS_PORT_LS_COMP_MOD))
pls |= USB_PORT_STAT_CONNECTION;
}
@@ -666,7 +668,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
}
/* Update Port Link State */
if (hcd->speed == HCD_USB3) {
- xhci_hub_report_usb3_link_state(&status, raw_port_status);
+ xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
/*
* Verify if all USB3 Ports Have entered U0 already.
* Delete Compliance Mode Timer if so.
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8056d90690ee..8936211b161d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1812,6 +1812,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
+ xhci->lpm_command = NULL;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
@@ -1819,7 +1820,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_cleanup_command_queue(xhci);
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- for (i = 0; i < num_ports; i++) {
+ for (i = 0; i < num_ports && xhci->rh_bw; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
struct list_head *ep = &bwt->interval_bw[j].endpoints;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c020b094fe7d..c4a8fca8ae93 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3971,13 +3971,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
int ret;
spin_lock_irqsave(&xhci->lock, flags);
- if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+
+ virt_dev = xhci->devs[udev->slot_id];
+
+ /*
+ * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
+ * xHC was re-initialized. Exit latency will be set later after
+ * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
+ */
+
+ if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
/* Attempt to issue an Evaluate Context command to change the MEL. */
- virt_dev = xhci->devs[udev->slot_id];
command = xhci->lpm_command;
ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
if (!ctrl_ctx) {
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ecdd6328aafb..acdfb3e68a90 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -39,6 +39,7 @@ struct cppi41_dma_channel {
u32 transferred;
u32 packet_sz;
struct list_head tx_check;
+ int tx_zlp;
};
#define MUSB_DMA_NUM_CHANNELS 15
@@ -122,6 +123,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
{
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
+ void __iomem *epio = hw_ep->regs;
+ u16 csr;
if (!cppi41_channel->prog_len ||
(cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
@@ -131,15 +134,24 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
cppi41_channel->transferred;
cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
cppi41_channel->channel.rx_packet_done = true;
+
+ /*
+ * transmit ZLP using PIO mode for transfers which size is
+ * multiple of EP packet size.
+ */
+ if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
+ cppi41_channel->packet_sz) == 0) {
+ musb_ep_select(musb->mregs, hw_ep->epnum);
+ csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
} else {
/* next iteration, reload */
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
- u16 csr;
u32 remain_bytes;
- void __iomem *epio = cppi41_channel->hw_ep->regs;
cppi41_channel->buf_addr += cppi41_channel->packet_sz;
@@ -363,6 +375,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
cppi41_channel->total_len = len;
cppi41_channel->transferred = 0;
cppi41_channel->packet_sz = packet_sz;
+ cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
/*
* Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8c2f23b75d6d..0e0c41587a08 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012-2014 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
*
@@ -130,7 +130,13 @@ static const struct mxs_phy_data vf610_phy_data = {
MXS_PHY_NEED_IP_FIX,
};
+static const struct mxs_phy_data imx6sx_phy_data = {
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_NEED_IP_FIX,
+};
+
static const struct of_device_id mxs_phy_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
{ .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
{ .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
{ .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 13b4fa287da8..886f1807a67b 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -878,8 +878,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
return -ENOMEM;
}
- tegra_phy->config = devm_kzalloc(&pdev->dev,
- sizeof(*tegra_phy->config), GFP_KERNEL);
+ tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
+ GFP_KERNEL);
if (!tegra_phy->config) {
dev_err(&pdev->dev,
"unable to allocate memory for USB UTMIP config\n");
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 4fd36530bfa3..b0c97a3f1bfe 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -108,19 +108,45 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
return list_first_entry(&pipe->list, struct usbhs_pkt, node);
}
+static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo);
+static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo);
+static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
+ struct usbhs_pkt *pkt);
+#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
+#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
+static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
unsigned long flags;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
+ usbhs_pipe_disable(pipe);
+
if (!pkt)
pkt = __usbhsf_pkt_get(pipe);
- if (pkt)
+ if (pkt) {
+ struct dma_chan *chan = NULL;
+
+ if (fifo)
+ chan = usbhsf_dma_chan_get(fifo, pkt);
+ if (chan) {
+ dmaengine_terminate_all(chan);
+ usbhsf_fifo_clear(pipe, fifo);
+ usbhsf_dma_unmap(pkt);
+ }
+
__usbhsf_pkt_del(pkt);
+ }
+
+ if (fifo)
+ usbhsf_fifo_unselect(pipe, fifo);
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
@@ -544,6 +570,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
usbhsf_send_terminator(pipe, fifo);
usbhsf_tx_irq_ctrl(pipe, !*is_done);
+ usbhs_pipe_running(pipe, !*is_done);
usbhs_pipe_enable(pipe);
dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
@@ -570,12 +597,21 @@ usbhs_fifo_write_busy:
* retry in interrupt
*/
usbhsf_tx_irq_ctrl(pipe, 1);
+ usbhs_pipe_running(pipe, 1);
return ret;
}
+static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+{
+ if (usbhs_pipe_is_running(pkt->pipe))
+ return 0;
+
+ return usbhsf_pio_try_push(pkt, is_done);
+}
+
struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
- .prepare = usbhsf_pio_try_push,
+ .prepare = usbhsf_pio_prepare_push,
.try_run = usbhsf_pio_try_push,
};
@@ -589,6 +625,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
if (usbhs_pipe_is_busy(pipe))
return 0;
+ if (usbhs_pipe_is_running(pipe))
+ return 0;
+
/*
* pipe enable to prepare packet receive
*/
@@ -597,6 +636,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
usbhs_pipe_enable(pipe);
+ usbhs_pipe_running(pipe, 1);
usbhsf_rx_irq_ctrl(pipe, 1);
return 0;
@@ -642,6 +682,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
(total_len < maxp)) { /* short packet */
*is_done = 1;
usbhsf_rx_irq_ctrl(pipe, 0);
+ usbhs_pipe_running(pipe, 0);
usbhs_pipe_disable(pipe); /* disable pipe first */
}
@@ -763,8 +804,6 @@ static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
usbhs_bset(priv, fifo->sel, DREQE, dreqe);
}
-#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
-#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
{
struct usbhs_pipe *pipe = pkt->pipe;
@@ -805,6 +844,7 @@ static void xfer_work(struct work_struct *work)
dev_dbg(dev, " %s %d (%d/ %d)\n",
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
+ usbhs_pipe_running(pipe, 1);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
usbhs_pipe_enable(pipe);
usbhsf_dma_start(pipe, fifo);
@@ -836,6 +876,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_push;
+ /* return at this time if the pipe is running */
+ if (usbhs_pipe_is_running(pipe))
+ return 0;
+
/* get enable DMA fifo */
fifo = usbhsf_get_dma_fifo(priv, pkt);
if (!fifo)
@@ -869,15 +913,29 @@ usbhsf_pio_prepare_push:
static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
+ int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
+
+ pkt->actual += pkt->trans;
- pkt->actual = pkt->trans;
+ if (pkt->actual < pkt->length)
+ *is_done = 0; /* there are remainder data */
+ else if (is_short)
+ *is_done = 1; /* short packet */
+ else
+ *is_done = !pkt->zero; /* send zero packet? */
- *is_done = !pkt->zero; /* send zero packet ? */
+ usbhs_pipe_running(pipe, !*is_done);
usbhsf_dma_stop(pipe, pipe->fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
+ if (!*is_done) {
+ /* change handler to PIO */
+ pkt->handler = &usbhs_fifo_pio_push_handler;
+ return pkt->handler->try_run(pkt, is_done);
+ }
+
return 0;
}
@@ -972,8 +1030,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
if ((pkt->actual == pkt->length) || /* receive all data */
(pkt->trans < maxp)) { /* short packet */
*is_done = 1;
+ usbhs_pipe_running(pipe, 0);
} else {
/* re-enable */
+ usbhs_pipe_running(pipe, 0);
usbhsf_prepare_pop(pkt, is_done);
}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 6a030b931a3b..9a705b15b3a1 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -213,7 +213,10 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
{
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
u16 intenb0, intenb1;
+ unsigned long flags;
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
state->intsts0 = usbhs_read(priv, INTSTS0);
state->intsts1 = usbhs_read(priv, INTSTS1);
@@ -229,6 +232,8 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
state->bempsts &= mod->irq_bempsts;
state->brdysts &= mod->irq_brdysts;
}
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
/*
* Check whether the irq enable registers and the irq status are set
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 75fbcf6b102e..040bcefcb040 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
+{
+ return usbhsp_flags_has(pipe, IS_RUNNING);
+}
+
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
+{
+ if (running)
+ usbhsp_flags_set(pipe, IS_RUNNING);
+ else
+ usbhsp_flags_clr(pipe, IS_RUNNING);
+}
+
void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
{
u16 mask = (SQCLR | SQSET);
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 406f36d050e4..d24a05972370 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -36,6 +36,7 @@ struct usbhs_pipe {
#define USBHS_PIPE_FLAGS_IS_USED (1 << 0)
#define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1)
#define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2)
+#define USBHS_PIPE_FLAGS_IS_RUNNING (1 << 3)
struct usbhs_pkt_handle *handler;
@@ -80,6 +81,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv);
void usbhs_pipe_remove(struct usbhs_priv *priv);
int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
+void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
+
void usbhs_pipe_init(struct usbhs_priv *priv,
int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 824ea5e7ec8b..dc72b924c399 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -728,6 +728,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -939,6 +940,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ /* GE Healthcare devices */
+ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 70b0b1d88ae9..5937b2d242f2 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -837,6 +837,12 @@
#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
/*
+ * NOVITUS printers
+ */
+#define NOVITUS_VID 0x1a28
+#define NOVITUS_BONO_E_PID 0x6010
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
@@ -1385,3 +1391,9 @@
* ekey biometric systems GmbH (http://ekey.net/)
*/
#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
+
+/*
+ * GE Healthcare devices
+ */
+#define GE_HEALTHCARE_VID 0x1901
+#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 6f7f01eb556a..46179a0828eb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
/* Sierra Wireless HSPA Non-Composite Device */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
{ USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
- { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
+ /* Sierra Wireless Direct IP modems */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
/* AT&T Direct IP LTE modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
- { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ /* Airprime/Sierra Wireless Direct IP modems */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index 1a132e9e947a..c9bb107d5e5c 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -272,6 +272,14 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
}
static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x19d2, 0xffec) },
+ { USB_DEVICE(0x19d2, 0xffee) },
+ { USB_DEVICE(0x19d2, 0xfff6) },
+ { USB_DEVICE(0x19d2, 0xfff7) },
+ { USB_DEVICE(0x19d2, 0xfff8) },
+ { USB_DEVICE(0x19d2, 0xfff9) },
+ { USB_DEVICE(0x19d2, 0xfffb) },
+ { USB_DEVICE(0x19d2, 0xfffc) },
/* MG880 */
{ USB_DEVICE(0x19d2, 0xfffd) },
{ },
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 503ac5c8d80f..8a6f371ed6e7 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -59,10 +59,6 @@ static int uas_use_uas_driver(struct usb_interface *intf,
unsigned long flags = id->driver_info;
int r, alt;
- usb_stor_adjust_quirks(udev, &flags);
-
- if (flags & US_FL_IGNORE_UAS)
- return 0;
alt = uas_find_uas_alt_setting(intf);
if (alt < 0)
@@ -72,6 +68,29 @@ static int uas_use_uas_driver(struct usb_interface *intf,
if (r < 0)
return 0;
+ /*
+ * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
+ * broken on the ASM1051, use the number of streams to differentiate.
+ * New ASM1053-s also support 32 streams, but have a different prod-id.
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
+ if (udev->speed < USB_SPEED_SUPER) {
+ /* No streams info, assume ASM1051 */
+ flags |= US_FL_IGNORE_UAS;
+ } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+ flags |= US_FL_IGNORE_UAS;
+ }
+ }
+
+ usb_stor_adjust_quirks(udev, &flags);
+
+ if (flags & US_FL_IGNORE_UAS) {
+ dev_warn(&udev->dev,
+ "UAS is blacklisted for this device, using usb-storage instead\n");
+ return 0;
+ }
+
if (udev->bus->sg_tablesize == 0) {
dev_warn(&udev->dev,
"The driver for the USB controller %s does not support scatter-gather which is\n",
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ef99b2f3aaf..60cfcbc78552 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -741,6 +741,12 @@ UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SINGLE_LUN ),
+UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100,
+ "Iomega",
+ "Jaz USB Adapter",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN ),
+
/* Reported by <Hendryk.Pfeiffer@gmx.de> */
UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
"LaCie",
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
index 80079b8fed15..d0303f0dbe15 100644
--- a/drivers/uwb/lc-dev.c
+++ b/drivers/uwb/lc-dev.c
@@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
uwb_dev->mac_addr = *bce->mac_addr;
uwb_dev->dev_addr = bce->dev_addr;
dev_set_name(&uwb_dev->dev, "%s", macbuf);
+
+ /* plug the beacon cache */
+ bce->uwb_dev = uwb_dev;
+ uwb_dev->bce = bce;
+ uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
+
result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
if (result < 0) {
dev_err(dev, "new device %s: cannot instantiate device\n",
macbuf);
goto error_dev_add;
}
- /* plug the beacon cache */
- bce->uwb_dev = uwb_dev;
- uwb_dev->bce = bce;
- uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
+
dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
dev_name(rc->uwb_dev.dev.parent));
@@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
return;
error_dev_add:
+ bce->uwb_dev = NULL;
+ uwb_bce_put(bce);
kfree(uwb_dev);
return;
}
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index a7b6217ac87b..6ad23bd3523a 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -639,9 +639,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
if (g0 != panels[i].g0)
continue;
if (r0 == panels[i].r0 && b0 == panels[i].b0)
- fb->panel->caps = panels[i].caps & CLCD_CAP_RGB;
- if (r0 == panels[i].b0 && b0 == panels[i].r0)
- fb->panel->caps = panels[i].caps & CLCD_CAP_BGR;
+ fb->panel->caps = panels[i].caps;
}
return fb->panel->caps ? 0 : -EINVAL;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5c660c77f03b..1e0a317d3dcd 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit)
rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
if (rc) {
- pr_info("%s: add_memory() failed: %i\n", __func__, rc);
- return BP_EAGAIN;
+ pr_warn("Cannot add additional memory (%i)\n", rc);
+ return BP_ECANCELED;
}
balloon_hotplug -= credit;
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 787d17945418..e53fe191738c 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
int i, rc, readonly;
LIST_HEAD(queue_gref);
LIST_HEAD(queue_file);
- struct gntalloc_gref *gref;
+ struct gntalloc_gref *gref, *next;
readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
rc = -ENOMEM;
@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
goto undo;
/* Grant foreign access to the page. */
- gref->gref_id = gnttab_grant_foreign_access(op->domid,
+ rc = gnttab_grant_foreign_access(op->domid,
pfn_to_mfn(page_to_pfn(gref->page)), readonly);
- if ((int)gref->gref_id < 0) {
- rc = gref->gref_id;
+ if (rc < 0)
goto undo;
- }
- gref_ids[i] = gref->gref_id;
+ gref_ids[i] = gref->gref_id = rc;
}
/* Add to gref lists. */
@@ -162,8 +160,8 @@ undo:
mutex_lock(&gref_mutex);
gref_size -= (op->count - i);
- list_for_each_entry(gref, &queue_file, next_file) {
- /* __del_gref does not remove from queue_file */
+ list_for_each_entry_safe(gref, next, &queue_file, next_file) {
+ list_del(&gref->next_file);
__del_gref(gref);
}
@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0;
- if (gref->gref_id > 0) {
+ if (gref->gref_id) {
if (gnttab_query_foreign_access(gref->gref_id))
return;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 5f1e1f3cd186..f8bb36f9d9ce 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -103,16 +103,11 @@ static void do_suspend(void)
shutting_down = SHUTDOWN_SUSPEND;
-#ifdef CONFIG_PREEMPT
- /* If the kernel is preemptible, we need to freeze all the processes
- to prevent them from being in the middle of a pagetable update
- during suspend. */
err = freeze_processes();
if (err) {
pr_err("%s: freeze failed %d\n", __func__, err);
goto out;
}
-#endif
err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
@@ -157,10 +152,8 @@ out_resume:
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
out_thaw:
-#ifdef CONFIG_PREEMPT
thaw_processes();
out:
-#endif
shutting_down = SHUTDOWN_INVALID;
}
#endif /* CONFIG_HIBERNATE_CALLBACKS */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 36861b7a6757..ff1cc0399b9a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1966,7 +1966,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
btrfs_init_log_ctx(&ctx);
- ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx);
+ ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
if (ret < 0) {
/* Fallthrough and commit/free transaction. */
ret = 1;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9c194bd74d6e..016c403bfe7e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -778,8 +778,12 @@ retry:
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
- if (ret)
+ if (ret) {
+ btrfs_drop_extent_cache(inode, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, 0);
goto out_free_reserve;
+ }
/*
* clear dirty, set writeback and unlock the pages.
@@ -971,14 +975,14 @@ static noinline int cow_file_range(struct inode *inode,
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
if (ret)
- goto out_reserve;
+ goto out_drop_extent_cache;
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
if (ret)
- goto out_reserve;
+ goto out_drop_extent_cache;
}
if (disk_num_bytes < cur_alloc_size)
@@ -1006,6 +1010,8 @@ static noinline int cow_file_range(struct inode *inode,
out:
return ret;
+out_drop_extent_cache:
+ btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
out_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_unlock:
@@ -4242,7 +4248,8 @@ out:
btrfs_abort_transaction(trans, root, ret);
}
error:
- if (last_size != (u64)-1)
+ if (last_size != (u64)-1 &&
+ root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
btrfs_ordered_update_i_size(inode, last_size, NULL);
btrfs_free_path(path);
return err;
@@ -5627,6 +5634,17 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
return ret;
}
+static int btrfs_insert_inode_locked(struct inode *inode)
+{
+ struct btrfs_iget_args args;
+ args.location = &BTRFS_I(inode)->location;
+ args.root = BTRFS_I(inode)->root;
+
+ return insert_inode_locked4(inode,
+ btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
+ btrfs_find_actor, &args);
+}
+
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
@@ -5719,10 +5737,19 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
sizes[1] = name_len + sizeof(*ref);
}
+ location = &BTRFS_I(inode)->location;
+ location->objectid = objectid;
+ location->offset = 0;
+ btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+
+ ret = btrfs_insert_inode_locked(inode);
+ if (ret < 0)
+ goto fail;
+
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
if (ret != 0)
- goto fail;
+ goto fail_unlock;
inode_init_owner(inode, dir, mode);
inode_set_bytes(inode, 0);
@@ -5745,11 +5772,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- location = &BTRFS_I(inode)->location;
- location->objectid = objectid;
- location->offset = 0;
- btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
-
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
@@ -5760,7 +5782,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BTRFS_INODE_NODATASUM;
}
- btrfs_insert_inode_hash(inode);
inode_tree_add(inode);
trace_btrfs_inode_new(inode);
@@ -5775,6 +5796,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_ino(inode), root->root_key.objectid, ret);
return inode;
+
+fail_unlock:
+ unlock_new_inode(inode);
fail:
if (dir && name)
BTRFS_I(dir)->index_cnt--;
@@ -5909,28 +5933,28 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err) {
- drop_inode = 1;
- goto out_unlock;
- }
-
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
-
inode->i_op = &btrfs_special_inode_operations;
- err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+ init_special_inode(inode, inode->i_mode, rdev);
+
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- drop_inode = 1;
- else {
- init_special_inode(inode, inode->i_mode, rdev);
+ goto out_unlock_inode;
+
+ err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
+ if (err) {
+ goto out_unlock_inode;
+ } else {
btrfs_update_inode(trans, root, inode);
+ unlock_new_inode(inode);
d_instantiate(dentry, inode);
}
+
out_unlock:
btrfs_end_transaction(trans, root);
btrfs_balance_delayed_items(root);
@@ -5940,6 +5964,12 @@ out_unlock:
iput(inode);
}
return err;
+
+out_unlock_inode:
+ drop_inode = 1;
+ unlock_new_inode(inode);
+ goto out_unlock;
+
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -5974,15 +6004,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
drop_inode_on_err = 1;
-
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err)
- goto out_unlock;
-
- err = btrfs_update_inode(trans, root, inode);
- if (err)
- goto out_unlock;
-
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -5991,14 +6012,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
+ inode->i_mapping->a_ops = &btrfs_aops;
+ inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+ if (err)
+ goto out_unlock_inode;
+
+ err = btrfs_update_inode(trans, root, inode);
+ if (err)
+ goto out_unlock_inode;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
- goto out_unlock;
+ goto out_unlock_inode;
- inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ unlock_new_inode(inode);
d_instantiate(dentry, inode);
out_unlock:
@@ -6010,6 +6040,11 @@ out_unlock:
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
+
+out_unlock_inode:
+ unlock_new_inode(inode);
+ goto out_unlock;
+
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6117,25 +6152,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
drop_on_err = 1;
+ /* these must be set before we unlock the inode */
+ inode->i_op = &btrfs_dir_inode_operations;
+ inode->i_fop = &btrfs_dir_file_operations;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_fail;
-
- inode->i_op = &btrfs_dir_inode_operations;
- inode->i_fop = &btrfs_dir_file_operations;
+ goto out_fail_inode;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_fail;
+ goto out_fail_inode;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
- goto out_fail;
+ goto out_fail_inode;
d_instantiate(dentry, inode);
+ /*
+ * mkdir is special. We're unlocking after we call d_instantiate
+ * to avoid a race with nfsd calling d_instantiate.
+ */
+ unlock_new_inode(inode);
drop_on_err = 0;
out_fail:
@@ -6145,6 +6185,10 @@ out_fail:
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
+
+out_fail_inode:
+ unlock_new_inode(inode);
+ goto out_fail;
}
/* helper for btfs_get_extent. Given an existing extent in the tree,
@@ -8100,6 +8144,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
set_nlink(inode, 1);
btrfs_i_size_write(inode, 0);
+ unlock_new_inode(inode);
err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
if (err)
@@ -8760,12 +8805,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err) {
- drop_inode = 1;
- goto out_unlock;
- }
-
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -8774,23 +8813,22 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
+ inode->i_mapping->a_ops = &btrfs_aops;
+ inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+ BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+
+ err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
+ if (err)
+ goto out_unlock_inode;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
- drop_inode = 1;
- else {
- inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
- }
- if (drop_inode)
- goto out_unlock;
+ goto out_unlock_inode;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
- drop_inode = 1;
- goto out_unlock;
+ goto out_unlock_inode;
}
key.objectid = btrfs_ino(inode);
key.offset = 0;
@@ -8799,9 +8837,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
err = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (err) {
- drop_inode = 1;
btrfs_free_path(path);
- goto out_unlock;
+ goto out_unlock_inode;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -8825,12 +8862,15 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len);
err = btrfs_update_inode(trans, root, inode);
- if (err)
+ if (err) {
drop_inode = 1;
+ goto out_unlock_inode;
+ }
+
+ unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
out_unlock:
- if (!err)
- d_instantiate(dentry, inode);
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
@@ -8838,6 +8878,11 @@ out_unlock:
}
btrfs_btree_balance_dirty(root);
return err;
+
+out_unlock_inode:
+ drop_inode = 1;
+ unlock_new_inode(inode);
+ goto out_unlock;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -9021,14 +9066,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
- ret = btrfs_init_inode_security(trans, inode, dir, NULL);
- if (ret)
- goto out;
-
- ret = btrfs_update_inode(trans, root, inode);
- if (ret)
- goto out;
-
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
@@ -9036,9 +9073,16 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ ret = btrfs_init_inode_security(trans, inode, dir, NULL);
+ if (ret)
+ goto out_inode;
+
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret)
+ goto out_inode;
ret = btrfs_orphan_add(trans, inode);
if (ret)
- goto out;
+ goto out_inode;
/*
* We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -9048,6 +9092,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
* d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
*/
set_nlink(inode, 1);
+ unlock_new_inode(inode);
d_tmpfile(dentry, inode);
mark_inode_dirty(inode);
@@ -9057,8 +9102,12 @@ out:
iput(inode);
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
-
return ret;
+
+out_inode:
+ unlock_new_inode(inode);
+ goto out;
+
}
static const struct inode_operations btrfs_dir_inode_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fce6fd0e3f50..8a8e29878c34 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1019,8 +1019,10 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
return false;
next = defrag_lookup_extent(inode, em->start + em->len);
- if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE ||
- (em->block_start + em->block_len == next->block_start))
+ if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+ ret = false;
+ else if ((em->block_start + em->block_len == next->block_start) &&
+ (em->block_len > 128 * 1024 && next->block_len > 128 * 1024))
ret = false;
free_extent_map(next);
@@ -1055,7 +1057,6 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh,
}
next_mergeable = defrag_check_next_extent(inode, em);
-
/*
* we hit a real extent, if it is big or the next extent is not a
* real extent, don't bother defragging it
@@ -1702,7 +1703,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
BTRFS_SUBVOL_QGROUP_INHERIT)) {
ret = -EOPNOTSUPP;
- goto out;
+ goto free_args;
}
if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
@@ -1712,27 +1713,31 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
if (vol_args->size > PAGE_CACHE_SIZE) {
ret = -EINVAL;
- goto out;
+ goto free_args;
}
inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
if (IS_ERR(inherit)) {
ret = PTR_ERR(inherit);
- goto out;
+ goto free_args;
}
}
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol, ptr,
readonly, inherit);
+ if (ret)
+ goto free_inherit;
- if (ret == 0 && ptr &&
- copy_to_user(arg +
- offsetof(struct btrfs_ioctl_vol_args_v2,
- transid), ptr, sizeof(*ptr)))
+ if (ptr && copy_to_user(arg +
+ offsetof(struct btrfs_ioctl_vol_args_v2,
+ transid),
+ ptr, sizeof(*ptr)))
ret = -EFAULT;
-out:
- kfree(vol_args);
+
+free_inherit:
kfree(inherit);
+free_args:
+ kfree(vol_args);
return ret;
}
@@ -2652,7 +2657,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
- goto out;
+ goto err_drop;
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
@@ -2670,6 +2675,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
out:
kfree(vol_args);
+err_drop:
mnt_drop_write_file(file);
return ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 7e0e6e3029dd..d296efe2d3e7 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -94,8 +94,10 @@
#define LOG_WALK_REPLAY_ALL 3
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- int inode_only);
+ struct btrfs_root *root, struct inode *inode,
+ int inode_only,
+ const loff_t start,
+ const loff_t end);
static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid);
@@ -3858,8 +3860,10 @@ process:
* This handles both files and directories.
*/
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- int inode_only)
+ struct btrfs_root *root, struct inode *inode,
+ int inode_only,
+ const loff_t start,
+ const loff_t end)
{
struct btrfs_path *path;
struct btrfs_path *dst_path;
@@ -3876,6 +3880,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
int ins_nr;
bool fast_search = false;
u64 ino = btrfs_ino(inode);
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
path = btrfs_alloc_path();
if (!path)
@@ -4049,13 +4054,35 @@ log_extents:
goto out_unlock;
}
} else if (inode_only == LOG_INODE_ALL) {
- struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em, *n;
- write_lock(&tree->lock);
- list_for_each_entry_safe(em, n, &tree->modified_extents, list)
- list_del_init(&em->list);
- write_unlock(&tree->lock);
+ write_lock(&em_tree->lock);
+ /*
+ * We can't just remove every em if we're called for a ranged
+ * fsync - that is, one that doesn't cover the whole possible
+ * file range (0 to LLONG_MAX). This is because we can have
+ * em's that fall outside the range we're logging and therefore
+ * their ordered operations haven't completed yet
+ * (btrfs_finish_ordered_io() not invoked yet). This means we
+ * didn't get their respective file extent item in the fs/subvol
+ * tree yet, and need to let the next fast fsync (one which
+ * consults the list of modified extent maps) find the em so
+ * that it logs a matching file extent item and waits for the
+ * respective ordered operation to complete (if it's still
+ * running).
+ *
+ * Removing every em outside the range we're logging would make
+ * the next fast fsync not log their matching file extent items,
+ * therefore making us lose data after a log replay.
+ */
+ list_for_each_entry_safe(em, n, &em_tree->modified_extents,
+ list) {
+ const u64 mod_end = em->mod_start + em->mod_len - 1;
+
+ if (em->mod_start >= start && mod_end <= end)
+ list_del_init(&em->list);
+ }
+ write_unlock(&em_tree->lock);
}
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
@@ -4065,8 +4092,19 @@ log_extents:
goto out_unlock;
}
}
- BTRFS_I(inode)->logged_trans = trans->transid;
- BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
+
+ write_lock(&em_tree->lock);
+ /*
+ * If we're doing a ranged fsync and there are still modified extents
+ * in the list, we must run on the next fsync call as it might cover
+ * those extents (a full fsync or an fsync for other range).
+ */
+ if (list_empty(&em_tree->modified_extents)) {
+ BTRFS_I(inode)->logged_trans = trans->transid;
+ BTRFS_I(inode)->last_log_commit =
+ BTRFS_I(inode)->last_sub_trans;
+ }
+ write_unlock(&em_tree->lock);
out_unlock:
if (unlikely(err))
btrfs_put_logged_extents(&logged_list);
@@ -4161,7 +4199,10 @@ out:
*/
static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
- struct dentry *parent, int exists_only,
+ struct dentry *parent,
+ const loff_t start,
+ const loff_t end,
+ int exists_only,
struct btrfs_log_ctx *ctx)
{
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
@@ -4207,7 +4248,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (ret)
goto end_no_trans;
- ret = btrfs_log_inode(trans, root, inode, inode_only);
+ ret = btrfs_log_inode(trans, root, inode, inode_only, start, end);
if (ret)
goto end_trans;
@@ -4235,7 +4276,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (BTRFS_I(inode)->generation >
root->fs_info->last_trans_committed) {
- ret = btrfs_log_inode(trans, root, inode, inode_only);
+ ret = btrfs_log_inode(trans, root, inode, inode_only,
+ 0, LLONG_MAX);
if (ret)
goto end_trans;
}
@@ -4269,13 +4311,15 @@ end_no_trans:
*/
int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct dentry *dentry,
+ const loff_t start,
+ const loff_t end,
struct btrfs_log_ctx *ctx)
{
struct dentry *parent = dget_parent(dentry);
int ret;
ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
- 0, ctx);
+ start, end, 0, ctx);
dput(parent);
return ret;
@@ -4512,6 +4556,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
root->fs_info->last_trans_committed))
return 0;
- return btrfs_log_inode_parent(trans, root, inode, parent, 1, NULL);
+ return btrfs_log_inode_parent(trans, root, inode, parent, 0,
+ LLONG_MAX, 1, NULL);
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 7f5b41bd5373..e2e798ae7cd7 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -59,6 +59,8 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_recover_log_trees(struct btrfs_root *tree_root);
int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct dentry *dentry,
+ const loff_t start,
+ const loff_t end,
struct btrfs_log_ctx *ctx);
int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 603f18a65c12..a2172f3f69e3 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -22,6 +22,11 @@ config CIFS
support for OS/2 and Windows ME and similar servers is provided as
well.
+ The module also provides optional support for the followon
+ protocols for CIFS including SMB3, which enables
+ useful performance and security features (see the description
+ of CONFIG_CIFS_SMB2).
+
The cifs module provides an advanced network file system
client for mounting to CIFS compliant servers. It includes
support for DFS (hierarchical name space), secure per-user
@@ -121,7 +126,8 @@ config CIFS_ACL
depends on CIFS_XATTR && KEYS
help
Allows fetching CIFS/NTFS ACL from the server. The DACL blob
- is handed over to the application/caller.
+ is handed over to the application/caller. See the man
+ page for getcifsacl for more information.
config CIFS_DEBUG
bool "Enable CIFS debugging routines"
@@ -162,7 +168,7 @@ config CIFS_NFSD_EXPORT
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
config CIFS_SMB2
- bool "SMB2 network file system support"
+ bool "SMB2 and SMB3 network file system support"
depends on CIFS && INET
select NLS
select KEYS
@@ -170,16 +176,21 @@ config CIFS_SMB2
select DNS_RESOLVER
help
- This enables experimental support for the SMB2 (Server Message Block
- version 2) protocol. The SMB2 protocol is the successor to the
- popular CIFS and SMB network file sharing protocols. SMB2 is the
- native file sharing mechanism for recent versions of Windows
- operating systems (since Vista). SMB2 enablement will eventually
- allow users better performance, security and features, than would be
- possible with cifs. Note that smb2 mount options also are simpler
- (compared to cifs) due to protocol improvements.
-
- Unless you are a developer or tester, say N.
+ This enables support for the Server Message Block version 2
+ family of protocols, including SMB3. SMB3 support is
+ enabled on mount by specifying "vers=3.0" in the mount
+ options. These protocols are the successors to the popular
+ CIFS and SMB network file sharing protocols. SMB3 is the
+ native file sharing mechanism for the more recent
+ versions of Windows (Windows 8 and Windows 2012 and
+ later) and Samba server and many others support SMB3 well.
+ In general SMB3 enables better performance, security
+ and features, than would be possible with CIFS (Note that
+ when mounting to Samba, due to the CIFS POSIX extensions,
+ CIFS mounts can provide slightly better POSIX compatibility
+ than SMB3 mounts do though). Note that SMB2/SMB3 mount
+ options are also slightly simpler (compared to CIFS) due
+ to protocol improvements.
config CIFS_FSCACHE
bool "Provide CIFS client caching support"
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index dfc731b02aa9..25b8392bfdd2 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -70,11 +70,6 @@
#define SERVER_NAME_LENGTH 40
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
-/* used to define string lengths for reversing unicode strings */
-/* (256+1)*2 = 514 */
-/* (max path length + 1 for null) * 2 for unicode */
-#define MAX_NAME 514
-
/* SMB echo "timeout" -- FIXME: tunable? */
#define SMB_ECHO_INTERVAL (60 * HZ)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 03ed8a09581c..8a9fded7c135 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -837,7 +837,6 @@ cifs_demultiplex_thread(void *p)
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
char *buf = NULL;
- struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
current->flags |= PF_MEMALLOC;
@@ -928,19 +927,7 @@ cifs_demultiplex_thread(void *p)
if (server->smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(server->smallbuf);
- task_to_wake = xchg(&server->tsk, NULL);
clean_demultiplex_info(server);
-
- /* if server->tsk was NULL then wait for a signal before exiting */
- if (!task_to_wake) {
- set_current_state(TASK_INTERRUPTIBLE);
- while (!signal_pending(current)) {
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- set_current_state(TASK_RUNNING);
- }
-
module_put_and_exit(0);
}
@@ -1600,6 +1587,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
tmp_end++;
if (!(tmp_end < end && tmp_end[1] == delim)) {
/* No it is not. Set the password to NULL */
+ kfree(vol->password);
vol->password = NULL;
break;
}
@@ -1637,6 +1625,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
options = end;
}
+ kfree(vol->password);
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
@@ -2061,8 +2050,6 @@ cifs_find_tcp_session(struct smb_vol *vol)
static void
cifs_put_tcp_session(struct TCP_Server_Info *server)
{
- struct task_struct *task;
-
spin_lock(&cifs_tcp_ses_lock);
if (--server->srv_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -2086,10 +2073,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
-
- task = xchg(&server->tsk, NULL);
- if (task)
- force_sig(SIGKILL, task);
}
static struct TCP_Server_Info *
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3db0c5fd9a11..6cbd9c688cfe 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -497,6 +497,14 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
goto out;
}
+ if (file->f_flags & O_DIRECT &&
+ CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+ if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ file->f_op = &cifs_file_direct_nobrl_ops;
+ else
+ file->f_op = &cifs_file_direct_ops;
+ }
+
file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
if (file_info == NULL) {
if (server->ops->close)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d5fec92e0360..7c018a1c52f7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -467,6 +467,14 @@ int cifs_open(struct inode *inode, struct file *file)
cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
inode, file->f_flags, full_path);
+ if (file->f_flags & O_DIRECT &&
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ file->f_op = &cifs_file_direct_nobrl_ops;
+ else
+ file->f_op = &cifs_file_direct_ops;
+ }
+
if (server->oplocks)
oplock = REQ_OPLOCK;
else
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 949ec909ec9a..7899a40465b3 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1720,7 +1720,10 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
unlink_target:
/* Try unlinking the target dentry if it's not negative */
if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
- tmprc = cifs_unlink(target_dir, target_dentry);
+ if (d_is_dir(target_dentry))
+ tmprc = cifs_rmdir(target_dir, target_dentry);
+ else
+ tmprc = cifs_unlink(target_dir, target_dentry);
if (tmprc)
goto cifs_rename_exit;
rc = cifs_do_rename(xid, source_dentry, from_name,
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 798c80a41c88..b334a89d6a66 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
spin_unlock(&cifs_file_list_lock);
- if (server->ops->close)
- server->ops->close(xid, tcon, &cfile->fid);
+ if (server->ops->close_dir)
+ server->ops->close_dir(xid, tcon, &cfile->fid);
} else
spin_unlock(&cifs_file_list_lock);
if (cfile->srch_inf.ntwrk_buf_start) {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 39ee32688eac..3a5e83317683 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -243,10 +243,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
kfree(ses->serverOS);
ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
- if (ses->serverOS)
+ if (ses->serverOS) {
strncpy(ses->serverOS, bcc_ptr, len);
- if (strncmp(ses->serverOS, "OS/2", 4) == 0)
- cifs_dbg(FYI, "OS/2 server\n");
+ if (strncmp(ses->serverOS, "OS/2", 4) == 0)
+ cifs_dbg(FYI, "OS/2 server\n");
+ }
bcc_ptr += len + 1;
bleft -= len + 1;
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 3f17b4550831..45992944e238 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
goto out;
}
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (smb2_data == NULL) {
rc = -ENOMEM;
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 0150182a4494..899bbc86f73e 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
*adjust_tz = false;
*symlink = false;
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 5a48aa290dfe..f522193b7184 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -389,7 +389,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct smb2_file_all_info *smb2_data;
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
@@ -1035,7 +1035,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
if (keep_size == false)
return -EOPNOTSUPP;
- /*
+ /*
* Must check if file sparse since fallocate -z (zero range) assumes
* non-sparse allocation
*/
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index fa0dd044213b..74b3a6684383 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -530,7 +530,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
struct smb2_sess_setup_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
struct TCP_Server_Info *server = ses->server;
u16 blob_length = 0;
@@ -1403,8 +1403,7 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
rsp = (struct smb2_close_rsp *)iov[0].iov_base;
if (rc != 0) {
- if (tcon)
- cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+ cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
goto close_exit;
}
@@ -1533,7 +1532,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_ALL_INFORMATION,
- sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
sizeof(struct smb2_file_all_info), data);
}
diff --git a/fs/dcache.c b/fs/dcache.c
index d30ce699ae4b..7a5b51440afa 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
unsigned int hash)
{
hash += (unsigned long) parent / L1_CACHE_BYTES;
- hash = hash + (hash >> d_hash_shift);
- return dentry_hashtable + (hash & d_hash_mask);
+ return dentry_hashtable + hash_32(hash, d_hash_shift);
}
/* Statistics gathering. */
@@ -2656,6 +2655,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
dentry->d_parent = dentry;
list_del_init(&dentry->d_u.d_child);
anon->d_parent = dparent;
+ if (likely(!d_unhashed(anon))) {
+ hlist_bl_lock(&anon->d_sb->s_anon);
+ __hlist_bl_del(&anon->d_hash);
+ anon->d_hash.pprev = NULL;
+ hlist_bl_unlock(&anon->d_sb->s_anon);
+ }
list_move(&anon->d_u.d_child, &dparent->d_subdirs);
write_seqcount_end(&dentry->d_seq);
@@ -2714,7 +2719,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
write_seqlock(&rename_lock);
__d_materialise_dentry(dentry, new);
write_sequnlock(&rename_lock);
- __d_drop(new);
_d_rehash(new);
spin_unlock(&new->d_lock);
spin_unlock(&inode->i_lock);
@@ -2778,7 +2782,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
* could splice into our tree? */
__d_materialise_dentry(dentry, alias);
write_sequnlock(&rename_lock);
- __d_drop(alias);
goto found;
} else {
/* Nope, but we must(!) avoid directory
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index b10b48c2a7af..7bcfff900f05 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
- ep_take_care_of_epollwakeup(&epds);
+ if (ep_op_has_event(op))
+ ep_take_care_of_epollwakeup(&epds);
/*
* We have to check that the file structure underneath the file descriptor
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 90a3cdca3f88..603e4ebbd0ac 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3240,6 +3240,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
+ new.bh = NULL;
goto end_rename;
}
if (new.bh) {
@@ -3386,6 +3387,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
+ new.bh = NULL;
goto end_rename;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index bb0e80f03e2e..1e43b905ff98 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -575,6 +575,7 @@ handle_bb:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
+ bh = NULL;
goto out;
}
overhead = ext4_group_overhead_blocks(sb, group);
@@ -603,6 +604,7 @@ handle_ib:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
+ bh = NULL;
goto out;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 8f27c93f8d2e..ec9e082f9ecd 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
error = make_socks(serv, net);
if (error < 0)
- goto err_socks;
+ goto err_bind;
set_grace_period(net);
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
return 0;
-err_socks:
- svc_rpcb_cleanup(serv, net);
err_bind:
ln->nlmsvc_users--;
return error;
diff --git a/fs/namei.c b/fs/namei.c
index a996bb48dfab..215e44254c53 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -34,6 +34,7 @@
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
+#include <linux/hash.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -643,24 +644,22 @@ static int complete_walk(struct nameidata *nd)
static __always_inline void set_root(struct nameidata *nd)
{
- if (!nd->root.mnt)
- get_fs_root(current->fs, &nd->root);
+ get_fs_root(current->fs, &nd->root);
}
static int link_path_walk(const char *, struct nameidata *);
-static __always_inline void set_root_rcu(struct nameidata *nd)
+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
{
- if (!nd->root.mnt) {
- struct fs_struct *fs = current->fs;
- unsigned seq;
+ struct fs_struct *fs = current->fs;
+ unsigned seq, res;
- do {
- seq = read_seqcount_begin(&fs->seq);
- nd->root = fs->root;
- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
- } while (read_seqcount_retry(&fs->seq, seq));
- }
+ do {
+ seq = read_seqcount_begin(&fs->seq);
+ nd->root = fs->root;
+ res = __read_seqcount_begin(&nd->root.dentry->d_seq);
+ } while (read_seqcount_retry(&fs->seq, seq));
+ return res;
}
static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -860,7 +859,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
return PTR_ERR(s);
}
if (*s == '/') {
- set_root(nd);
+ if (!nd->root.mnt)
+ set_root(nd);
path_put(&nd->path);
nd->path = nd->root;
path_get(&nd->root);
@@ -1137,13 +1137,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
*/
*inode = path->dentry->d_inode;
}
- return read_seqretry(&mount_lock, nd->m_seq) &&
+ return !read_seqretry(&mount_lock, nd->m_seq) &&
!(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
}
static int follow_dotdot_rcu(struct nameidata *nd)
{
- set_root_rcu(nd);
+ struct inode *inode = nd->inode;
+ if (!nd->root.mnt)
+ set_root_rcu(nd);
while (1) {
if (nd->path.dentry == nd->root.dentry &&
@@ -1155,6 +1157,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
struct dentry *parent = old->d_parent;
unsigned seq;
+ inode = parent->d_inode;
seq = read_seqcount_begin(&parent->d_seq);
if (read_seqcount_retry(&old->d_seq, nd->seq))
goto failed;
@@ -1164,6 +1167,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
}
if (!follow_up_rcu(&nd->path))
break;
+ inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
while (d_mountpoint(nd->path.dentry)) {
@@ -1173,11 +1177,12 @@ static int follow_dotdot_rcu(struct nameidata *nd)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
+ inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- if (!read_seqretry(&mount_lock, nd->m_seq))
+ if (read_seqretry(&mount_lock, nd->m_seq))
goto failed;
}
- nd->inode = nd->path.dentry->d_inode;
+ nd->inode = inode;
return 0;
failed:
@@ -1256,7 +1261,8 @@ static void follow_mount(struct path *path)
static void follow_dotdot(struct nameidata *nd)
{
- set_root(nd);
+ if (!nd->root.mnt)
+ set_root(nd);
while(1) {
struct dentry *old = nd->path.dentry;
@@ -1634,8 +1640,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
static inline unsigned int fold_hash(unsigned long hash)
{
- hash += hash >> (8*sizeof(int));
- return hash;
+ return hash_64(hash, 32);
}
#else /* 32-bit case */
@@ -1669,13 +1674,14 @@ EXPORT_SYMBOL(full_name_hash);
/*
* Calculate the length and hash of the path component, and
- * return the length of the component;
+ * fill in the qstr. return the "len" as the result.
*/
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline unsigned long hash_name(const char *name, struct qstr *res)
{
unsigned long a, b, adata, bdata, mask, hash, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ res->name = name;
hash = a = 0;
len = -sizeof(unsigned long);
do {
@@ -1691,9 +1697,10 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
mask = create_zero_mask(adata | bdata);
hash += a & zero_bytemask(mask);
- *hashp = fold_hash(hash);
+ len += find_zero(mask);
+ res->hash_len = hashlen_create(fold_hash(hash), len);
- return len + find_zero(mask);
+ return len;
}
#else
@@ -1711,18 +1718,19 @@ EXPORT_SYMBOL(full_name_hash);
* We know there's a real path component here of at least
* one character.
*/
-static inline unsigned long hash_name(const char *name, unsigned int *hashp)
+static inline long hash_name(const char *name, struct qstr *res)
{
unsigned long hash = init_name_hash();
unsigned long len = 0, c;
+ res->name = name;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
- *hashp = end_name_hash(hash);
+ res->hash_len = hashlen_create(end_name_hash(hash), len);
return len;
}
@@ -1756,9 +1764,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (err)
break;
- len = hash_name(name, &this.hash);
- this.name = name;
- this.len = len;
+ len = hash_name(name, &this);
type = LAST_NORM;
if (name[0] == '.') switch (len) {
@@ -1852,7 +1858,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (*name=='/') {
if (flags & LOOKUP_RCU) {
rcu_read_lock();
- set_root_rcu(nd);
+ nd->seq = set_root_rcu(nd);
} else {
set_root(nd);
path_get(&nd->root);
@@ -1903,7 +1909,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
}
nd->inode = nd->path.dentry->d_inode;
- return 0;
+ if (!(flags & LOOKUP_RCU))
+ return 0;
+ if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
+ return 0;
+ if (!(nd->flags & LOOKUP_ROOT))
+ nd->root.mnt = NULL;
+ rcu_read_unlock();
+ return -ECHILD;
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 1c5ff6d58385..6a4f3666e273 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1412,24 +1412,18 @@ int nfs_fs_proc_net_init(struct net *net)
p = proc_create("volumes", S_IFREG|S_IRUGO,
nn->proc_nfsfs, &nfs_volume_list_fops);
if (!p)
- goto error_2;
+ goto error_1;
return 0;
-error_2:
- remove_proc_entry("servers", nn->proc_nfsfs);
error_1:
- remove_proc_entry("fs/nfsfs", NULL);
+ remove_proc_subtree("nfsfs", net->proc_net);
error_0:
return -ENOMEM;
}
void nfs_fs_proc_net_exit(struct net *net)
{
- struct nfs_net *nn = net_generic(net, nfs_net_id);
-
- remove_proc_entry("volumes", nn->proc_nfsfs);
- remove_proc_entry("servers", nn->proc_nfsfs);
- remove_proc_entry("fs/nfsfs", NULL);
+ remove_proc_subtree("nfsfs", net->proc_net);
}
/*
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 1359c4a27393..90978075f730 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -1269,11 +1269,12 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx)
{
struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- struct pnfs_commit_bucket *bucket = fl_cinfo->buckets;
+ struct pnfs_commit_bucket *bucket;
struct pnfs_layout_segment *freeme;
int i;
- for (i = idx; i < fl_cinfo->nbuckets; i++, bucket++) {
+ for (i = idx; i < fl_cinfo->nbuckets; i++) {
+ bucket = &fl_cinfo->buckets[i];
if (list_empty(&bucket->committing))
continue;
nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 92193eddb41d..a8b855ab4e22 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -130,16 +130,15 @@ enum {
*/
struct nfs4_lock_state {
- struct list_head ls_locks; /* Other lock stateids */
- struct nfs4_state * ls_state; /* Pointer to open state */
+ struct list_head ls_locks; /* Other lock stateids */
+ struct nfs4_state * ls_state; /* Pointer to open state */
#define NFS_LOCK_INITIALIZED 0
#define NFS_LOCK_LOST 1
- unsigned long ls_flags;
+ unsigned long ls_flags;
struct nfs_seqid_counter ls_seqid;
- nfs4_stateid ls_stateid;
- atomic_t ls_count;
- fl_owner_t ls_owner;
- struct work_struct ls_release;
+ nfs4_stateid ls_stateid;
+ atomic_t ls_count;
+ fl_owner_t ls_owner;
};
/* bits for nfs4_state->flags */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index a043f618cd5a..22fe35104c0c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -799,18 +799,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
return NULL;
}
-static void
-free_lock_state_work(struct work_struct *work)
-{
- struct nfs4_lock_state *lsp = container_of(work,
- struct nfs4_lock_state, ls_release);
- struct nfs4_state *state = lsp->ls_state;
- struct nfs_server *server = state->owner->so_server;
- struct nfs_client *clp = server->nfs_client;
-
- clp->cl_mvops->free_lock_state(server, lsp);
-}
-
/*
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
@@ -832,7 +820,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
if (lsp->ls_seqid.owner_id < 0)
goto out_free;
INIT_LIST_HEAD(&lsp->ls_locks);
- INIT_WORK(&lsp->ls_release, free_lock_state_work);
return lsp;
out_free:
kfree(lsp);
@@ -896,12 +883,13 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
spin_unlock(&state->state_lock);
- if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags))
- queue_work(nfsiod_workqueue, &lsp->ls_release);
- else {
- server = state->owner->so_server;
+ server = state->owner->so_server;
+ if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
+ struct nfs_client *clp = server->nfs_client;
+
+ clp->cl_mvops->free_lock_state(server, lsp);
+ } else
nfs4_free_lock_state(server, lsp);
- }
}
static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f9821ce6658a..e94457c33ad6 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2657,6 +2657,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
struct xdr_stream *xdr = cd->xdr;
int start_offset = xdr->buf->len;
int cookie_offset;
+ u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
__be64 wire_offset;
@@ -2718,7 +2719,14 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
cd->rd_maxcount -= entry_bytes;
if (!cd->rd_dircount)
goto fail;
- cd->rd_dircount--;
+ /*
+ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
+ * let's always let through the first entry, at least:
+ */
+ name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8;
+ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+ goto fail;
+ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
cd->cookie_offset = cookie_offset;
skip_entry:
cd->common.err = nfs_ok;
@@ -3321,6 +3329,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
}
maxcount = min_t(int, maxcount-16, bytes_left);
+ /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+ if (!readdir->rd_dircount)
+ readdir->rd_dircount = INT_MAX;
+
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 238a5930cb3c..9d7e2b9659cb 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
{
struct {
struct file_handle handle;
- u8 pad[64];
+ u8 pad[MAX_HANDLE_SZ];
} f;
int size, ret, i;
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
size = f.handle.handle_bytes >> 2;
ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
- if ((ret == 255) || (ret == -ENOSPC)) {
+ if ((ret == FILEID_INVALID) || (ret < 0)) {
WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
return 0;
}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 6eaf5edf1ea1..e77db621ec89 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -45,7 +45,7 @@ void udf_free_inode(struct inode *inode)
udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
-struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
@@ -55,14 +55,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
struct udf_inode_info *iinfo;
struct udf_inode_info *dinfo = UDF_I(dir);
struct logicalVolIntegrityDescImpUse *lvidiu;
+ int err;
inode = new_inode(sb);
- if (!inode) {
- *err = -ENOMEM;
- return NULL;
- }
- *err = -ENOSPC;
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
iinfo = UDF_I(inode);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
@@ -80,21 +78,22 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
}
if (!iinfo->i_ext.i_data) {
iput(inode);
- *err = -ENOMEM;
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ err = -ENOSPC;
block = udf_new_block(dir->i_sb, NULL,
dinfo->i_location.partitionReferenceNum,
- start, err);
- if (*err) {
+ start, &err);
+ if (err) {
iput(inode);
- return NULL;
+ return ERR_PTR(err);
}
lvidiu = udf_sb_lvidiu(sb);
if (lvidiu) {
iinfo->i_unique = lvid_get_unique_id(sb);
+ inode->i_generation = iinfo->i_unique;
mutex_lock(&sbi->s_alloc_mutex);
if (S_ISDIR(mode))
le32_add_cpu(&lvidiu->numDirs, 1);
@@ -123,9 +122,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
inode->i_mtime = inode->i_atime = inode->i_ctime =
iinfo->i_crtime = current_fs_time(inode->i_sb);
- insert_inode_hash(inode);
+ if (unlikely(insert_inode_locked(inode) < 0)) {
+ make_bad_inode(inode);
+ iput(inode);
+ return ERR_PTR(-EIO);
+ }
mark_inode_dirty(inode);
- *err = 0;
return inode;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 236cd48184c2..08598843288f 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL");
static umode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
-static void udf_fill_inode(struct inode *, struct buffer_head *);
static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
@@ -1271,12 +1270,33 @@ update_time:
return 0;
}
-static void __udf_read_inode(struct inode *inode)
+/*
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
+ * inode on write-once media but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_ICB_NESTING 1024
+
+static int udf_read_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
+ struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
+ struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
+ struct kernel_lb_addr *iloc = &iinfo->i_location;
+ unsigned int link_count;
+ unsigned int indirections = 0;
+ int ret = -EIO;
+
+reread:
+ if (iloc->logicalBlockNum >=
+ sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
+ udf_debug("block=%d, partition=%d out of range\n",
+ iloc->logicalBlockNum, iloc->partitionReferenceNum);
+ return -EIO;
+ }
/*
* Set defaults, but the inode is still incomplete!
@@ -1290,78 +1310,54 @@ static void __udf_read_inode(struct inode *inode)
* i_nlink = 1
* i_op = NULL;
*/
- bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
+ bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
- make_bad_inode(inode);
- return;
+ return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
- brelse(bh);
- make_bad_inode(inode);
- return;
+ goto out;
}
fe = (struct fileEntry *)bh->b_data;
+ efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
- ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
- &ident);
+ ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
- struct buffer_head *nbh = NULL;
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
- if (ie->indirectICB.extLength &&
- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
- &ident))) {
- if (ident == TAG_IDENT_FE ||
- ident == TAG_IDENT_EFE) {
- memcpy(&iinfo->i_location,
- &loc,
- sizeof(struct kernel_lb_addr));
- brelse(bh);
- brelse(ibh);
- brelse(nbh);
- __udf_read_inode(inode);
- return;
+ if (ie->indirectICB.extLength) {
+ brelse(ibh);
+ memcpy(&iinfo->i_location, &loc,
+ sizeof(struct kernel_lb_addr));
+ if (++indirections > UDF_MAX_ICB_NESTING) {
+ udf_err(inode->i_sb,
+ "too many ICBs in ICB hierarchy"
+ " (max %d supported)\n",
+ UDF_MAX_ICB_NESTING);
+ goto out;
}
- brelse(nbh);
+ brelse(bh);
+ goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
- brelse(bh);
- make_bad_inode(inode);
- return;
+ goto out;
}
- udf_fill_inode(inode, bh);
-
- brelse(bh);
-}
-
-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
-{
- struct fileEntry *fe;
- struct extendedFileEntry *efe;
- struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
- struct udf_inode_info *iinfo = UDF_I(inode);
- unsigned int link_count;
-
- fe = (struct fileEntry *)bh->b_data;
- efe = (struct extendedFileEntry *)bh->b_data;
-
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
@@ -1378,11 +1374,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct extendedFileEntry))) {
- make_bad_inode(inode);
- return;
- }
+ ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ sizeof(struct extendedFileEntry));
+ if (ret)
+ goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
inode->i_sb->s_blocksize -
@@ -1390,11 +1385,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct fileEntry))) {
- make_bad_inode(inode);
- return;
- }
+ ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ sizeof(struct fileEntry));
+ if (ret)
+ goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
@@ -1404,18 +1398,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
- sizeof(struct unallocSpaceEntry))) {
- make_bad_inode(inode);
- return;
- }
+ ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+ sizeof(struct unallocSpaceEntry));
+ if (ret)
+ goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
- return;
+ return 0;
}
+ ret = -EIO;
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
@@ -1441,8 +1435,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
- if (!link_count)
- link_count = 1;
+ if (!link_count) {
+ ret = -ESTALE;
+ goto out;
+ }
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
@@ -1488,6 +1484,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
+ inode->i_generation = iinfo->i_unique;
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
@@ -1537,8 +1534,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
- make_bad_inode(inode);
- return;
+ goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
@@ -1549,8 +1545,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
- make_bad_inode(inode);
+ goto out;
}
+ ret = 0;
+out:
+ brelse(bh);
+ return ret;
}
static int udf_alloc_i_data(struct inode *inode, size_t size)
@@ -1664,7 +1664,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
fe->permissions = cpu_to_le32(udfperms);
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
else
fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
@@ -1830,32 +1830,23 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
+ int err;
if (!inode)
- return NULL;
-
- if (inode->i_state & I_NEW) {
- memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
- __udf_read_inode(inode);
- unlock_new_inode(inode);
- }
+ return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode))
- goto out_iput;
+ if (!(inode->i_state & I_NEW))
+ return inode;
- if (ino->logicalBlockNum >= UDF_SB(sb)->
- s_partmaps[ino->partitionReferenceNum].s_partition_len) {
- udf_debug("block=%d, partition=%d out of range\n",
- ino->logicalBlockNum, ino->partitionReferenceNum);
- make_bad_inode(inode);
- goto out_iput;
+ memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+ err = udf_read_inode(inode);
+ if (err < 0) {
+ iget_failed(inode);
+ return ERR_PTR(err);
}
+ unlock_new_inode(inode);
return inode;
-
- out_iput:
- iput(inode);
- return NULL;
}
int udf_add_aext(struct inode *inode, struct extent_position *epos,
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 83a06001742b..c12e260fd6c4 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -270,9 +270,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
NULL, 0),
};
inode = udf_iget(dir->i_sb, lb);
- if (!inode) {
- return ERR_PTR(-EACCES);
- }
+ if (IS_ERR(inode))
+ return inode;
} else
#endif /* UDF_RECOVERY */
@@ -285,9 +284,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
loc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(dir->i_sb, &loc);
- if (!inode) {
- return ERR_PTR(-EACCES);
- }
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
@@ -550,32 +548,18 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
}
-static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct inode *dir = dentry->d_parent->d_inode;
struct udf_fileident_bh fibh;
- struct inode *inode;
struct fileIdentDesc cfi, *fi;
int err;
- struct udf_inode_info *iinfo;
-
- inode = udf_new_inode(dir, mode, &err);
- if (!inode) {
- return err;
- }
-
- iinfo = UDF_I(inode);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- inode->i_data.a_ops = &udf_adinicb_aops;
- else
- inode->i_data.a_ops = &udf_aops;
- inode->i_op = &udf_file_inode_operations;
- inode->i_fop = &udf_file_operations;
- mark_inode_dirty(inode);
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
- if (!fi) {
+ if (unlikely(!fi)) {
inode_dec_link_count(inode);
+ unlock_new_inode(inode);
iput(inode);
return err;
}
@@ -589,23 +573,21 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
+ unlock_new_inode(inode);
d_instantiate(dentry, inode);
return 0;
}
-static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
{
- struct inode *inode;
- struct udf_inode_info *iinfo;
- int err;
+ struct inode *inode = udf_new_inode(dir, mode);
- inode = udf_new_inode(dir, mode, &err);
- if (!inode)
- return err;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- iinfo = UDF_I(inode);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
@@ -613,7 +595,25 @@ static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
+ return udf_add_nondir(dentry, inode);
+}
+
+static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct inode *inode = udf_new_inode(dir, mode);
+
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ inode->i_data.a_ops = &udf_adinicb_aops;
+ else
+ inode->i_data.a_ops = &udf_aops;
+ inode->i_op = &udf_file_inode_operations;
+ inode->i_fop = &udf_file_operations;
+ mark_inode_dirty(inode);
d_tmpfile(dentry, inode);
+ unlock_new_inode(inode);
return 0;
}
@@ -621,44 +621,16 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
struct inode *inode;
- struct udf_fileident_bh fibh;
- struct fileIdentDesc cfi, *fi;
- int err;
- struct udf_inode_info *iinfo;
if (!old_valid_dev(rdev))
return -EINVAL;
- err = -EIO;
- inode = udf_new_inode(dir, mode, &err);
- if (!inode)
- goto out;
+ inode = udf_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- iinfo = UDF_I(inode);
init_special_inode(inode, mode, rdev);
- fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
- if (!fi) {
- inode_dec_link_count(inode);
- iput(inode);
- return err;
- }
- cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
- cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
- *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
- cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
- udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- mark_inode_dirty(dir);
- mark_inode_dirty(inode);
-
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
- brelse(fibh.sbh);
- d_instantiate(dentry, inode);
- err = 0;
-
-out:
- return err;
+ return udf_add_nondir(dentry, inode);
}
static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -670,10 +642,9 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct udf_inode_info *dinfo = UDF_I(dir);
struct udf_inode_info *iinfo;
- err = -EIO;
- inode = udf_new_inode(dir, S_IFDIR | mode, &err);
- if (!inode)
- goto out;
+ inode = udf_new_inode(dir, S_IFDIR | mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
iinfo = UDF_I(inode);
inode->i_op = &udf_dir_inode_operations;
@@ -681,6 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
if (!fi) {
inode_dec_link_count(inode);
+ unlock_new_inode(inode);
iput(inode);
goto out;
}
@@ -699,6 +671,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!fi) {
clear_nlink(inode);
mark_inode_dirty(inode);
+ unlock_new_inode(inode);
iput(inode);
goto out;
}
@@ -710,6 +683,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
inc_nlink(dir);
mark_inode_dirty(dir);
+ unlock_new_inode(inode);
d_instantiate(dentry, inode);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
@@ -876,14 +850,11 @@ out:
static int udf_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct inode *inode;
+ struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO);
struct pathComponent *pc;
const char *compstart;
- struct udf_fileident_bh fibh;
struct extent_position epos = {};
int eoffset, elen = 0;
- struct fileIdentDesc *fi;
- struct fileIdentDesc cfi;
uint8_t *ea;
int err;
int block;
@@ -892,9 +863,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
struct udf_inode_info *iinfo;
struct super_block *sb = dir->i_sb;
- inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
- if (!inode)
- goto out;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
@@ -1012,32 +982,15 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
up_write(&iinfo->i_data_sem);
- fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
- if (!fi)
- goto out_fail;
- cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
- cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
- if (UDF_SB(inode->i_sb)->s_lvid_bh) {
- *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
- cpu_to_le32(lvid_get_unique_id(sb));
- }
- udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- mark_inode_dirty(dir);
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
- brelse(fibh.sbh);
- d_instantiate(dentry, inode);
- err = 0;
-
+ err = udf_add_nondir(dentry, inode);
out:
kfree(name);
return err;
out_no_entry:
up_write(&iinfo->i_data_sem);
-out_fail:
inode_dec_link_count(inode);
+ unlock_new_inode(inode);
iput(inode);
goto out;
}
@@ -1222,7 +1175,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
struct udf_fileident_bh fibh;
if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
- goto out_unlock;
+ return ERR_PTR(-EACCES);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
@@ -1230,12 +1183,10 @@ static struct dentry *udf_get_parent(struct dentry *child)
tloc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(child->d_inode->i_sb, &tloc);
- if (!inode)
- goto out_unlock;
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
return d_obtain_alias(inode);
-out_unlock:
- return ERR_PTR(-EACCES);
}
@@ -1252,8 +1203,8 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
loc.partitionReferenceNum = partref;
inode = udf_iget(sb, &loc);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 813da94d447b..5401fc33f5cc 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -961,12 +961,14 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
metadata_fe = udf_iget(sb, &addr);
- if (metadata_fe == NULL)
+ if (IS_ERR(metadata_fe)) {
udf_warn(sb, "metadata inode efe not found\n");
- else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
+ return metadata_fe;
+ }
+ if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
iput(metadata_fe);
- metadata_fe = NULL;
+ return ERR_PTR(-EIO);
}
return metadata_fe;
@@ -978,6 +980,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
struct udf_part_map *map;
struct udf_meta_data *mdata;
struct kernel_lb_addr addr;
+ struct inode *fe;
map = &sbi->s_partmaps[partition];
mdata = &map->s_type_specific.s_metadata;
@@ -986,22 +989,24 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
udf_debug("Metadata file location: block = %d part = %d\n",
mdata->s_meta_file_loc, map->s_partition_num);
- mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
- mdata->s_meta_file_loc, map->s_partition_num);
-
- if (mdata->s_metadata_fe == NULL) {
+ fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
+ map->s_partition_num);
+ if (IS_ERR(fe)) {
/* mirror file entry */
udf_debug("Mirror metadata file location: block = %d part = %d\n",
mdata->s_mirror_file_loc, map->s_partition_num);
- mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
- mdata->s_mirror_file_loc, map->s_partition_num);
+ fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
+ map->s_partition_num);
- if (mdata->s_mirror_fe == NULL) {
+ if (IS_ERR(fe)) {
udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
- return -EIO;
+ return PTR_ERR(fe);
}
- }
+ mdata->s_mirror_fe = fe;
+ } else
+ mdata->s_metadata_fe = fe;
+
/*
* bitmap file entry
@@ -1015,15 +1020,16 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
- mdata->s_bitmap_fe = udf_iget(sb, &addr);
- if (mdata->s_bitmap_fe == NULL) {
+ fe = udf_iget(sb, &addr);
+ if (IS_ERR(fe)) {
if (sb->s_flags & MS_RDONLY)
udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
else {
udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
- return -EIO;
+ return PTR_ERR(fe);
}
- }
+ } else
+ mdata->s_bitmap_fe = fe;
}
udf_debug("udf_load_metadata_files Ok\n");
@@ -1111,13 +1117,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
phd->unallocSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
+ struct inode *inode;
- map->s_uspace.s_table = udf_iget(sb, &loc);
- if (!map->s_uspace.s_table) {
+ inode = udf_iget(sb, &loc);
+ if (IS_ERR(inode)) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
- return -EIO;
+ return PTR_ERR(inode);
}
+ map->s_uspace.s_table = inode;
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
p_index, map->s_uspace.s_table->i_ino);
@@ -1144,14 +1152,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
phd->freedSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
+ struct inode *inode;
- map->s_fspace.s_table = udf_iget(sb, &loc);
- if (!map->s_fspace.s_table) {
+ inode = udf_iget(sb, &loc);
+ if (IS_ERR(inode)) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
- return -EIO;
+ return PTR_ERR(inode);
}
-
+ map->s_fspace.s_table = inode;
map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
udf_debug("freedSpaceTable (part %d) @ %ld\n",
p_index, map->s_fspace.s_table->i_ino);
@@ -1178,6 +1187,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
struct udf_part_map *map = &sbi->s_partmaps[p_index];
sector_t vat_block;
struct kernel_lb_addr ino;
+ struct inode *inode;
/*
* VAT file entry is in the last recorded block. Some broken disks have
@@ -1186,10 +1196,13 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
ino.partitionReferenceNum = type1_index;
for (vat_block = start_block;
vat_block >= map->s_partition_root &&
- vat_block >= start_block - 3 &&
- !sbi->s_vat_inode; vat_block--) {
+ vat_block >= start_block - 3; vat_block--) {
ino.logicalBlockNum = vat_block - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
+ inode = udf_iget(sb, &ino);
+ if (!IS_ERR(inode)) {
+ sbi->s_vat_inode = inode;
+ break;
+ }
}
}
@@ -2205,10 +2218,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
/* assign inodes by physical block number */
/* perhaps it's not extensible enough, but for now ... */
inode = udf_iget(sb, &rootdir);
- if (!inode) {
+ if (IS_ERR(inode)) {
udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
- ret = -EIO;
+ ret = PTR_ERR(inode);
goto error_out;
}
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index be7dabbbcb49..742557be9936 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -143,7 +143,6 @@ extern int udf_expand_file_adinicb(struct inode *);
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
extern int udf_setsize(struct inode *, loff_t);
-extern void udf_read_inode(struct inode *);
extern void udf_evict_inode(struct inode *);
extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
extern long udf_block_map(struct inode *, sector_t);
@@ -209,7 +208,7 @@ extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t);
/* truncate.c */
extern void udf_truncate_tail_extent(struct inode *);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index e4ae2ad48d07..75a227cc7ce2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -55,6 +55,7 @@ struct qstr {
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
#define hashlen_hash(hashlen) ((u32) (hashlen))
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
+#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
struct dentry_stat_t {
long nr_dentry;
diff --git a/include/linux/hash.h b/include/linux/hash.h
index bd1754c7ecef..d0494c399392 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
hash += n;
n <<= 2;
hash += n;
+#endif
/* High bits are more random, so use them. */
return hash >> (64 - bits);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1f44466c1e9d..c367cbdf73ab 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
#endif
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
/*
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 071f6b234604..511c6e0d21a9 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id);
+
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 38377392d082..c8e388e5fccc 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3176,7 +3176,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
}
/**
- * __dev_uc_unsync - Remove synchonized addresses from device
+ * __dev_uc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
@@ -3220,7 +3220,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
}
/**
- * __dev_mc_unsync - Remove synchonized addresses from device
+ * __dev_mc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2077489f9887..2517ece98820 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -9,6 +9,7 @@
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
@@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-#if defined(CONFIG_JUMP_LABEL)
-#include <linux/static_key.h>
+#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b5d5af3aa469..6f884e6c731e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -464,6 +464,8 @@ struct hci_conn_params {
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
} auto_connect;
+
+ struct hci_conn *conn;
};
extern struct list_head hci_dev_list;
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index e2070960bac0..8170f8d7052b 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,6 @@ struct netns_sysctl_lowpan {
struct netns_ieee802154_lowpan {
struct netns_sysctl_lowpan sysctl;
struct netns_frags frags;
- int max_dsize;
};
#endif
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 259992444e80..dad7ab20a8cb 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -167,7 +167,7 @@ struct ieee80211_reg_rule {
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
- char alpha2[2];
+ char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
};
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index f6e7397e799d..9fbd856e6713 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
return asoc ? asoc->assoc_id : 0;
}
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+ /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+ * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+ * start at =1 in user space, but actually as =0 in kernel space.
+ * Now that we can not break user space and SCTP_EMPTY is exposed
+ * there, we need to fix it up with an ugly offset not to break
+ * applications. :(
+ */
+ return asoc->state + 1;
+}
+
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
diff --git a/include/net/sock.h b/include/net/sock.h
index 7f2ab72f321a..b9a5bd0ed9f3 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2165,9 +2165,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt.tv64 &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
- skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
+ (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp.tv64 &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
diff --git a/include/net/wimax.h b/include/net/wimax.h
index e52ef5357e08..c52b68577cb0 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -290,7 +290,7 @@ struct wimax_dev;
* This operation has to be synchronous, and return only when the
* reset is complete. In case of having had to resort to bus/cold
* reset implying a device disconnection, the call is allowed to
- * return inmediately.
+ * return immediately.
* NOTE: wimax_dev->mutex is NOT locked when this op is being
* called; however, wimax_dev->mutex_reset IS locked to ensure
* serialization of calls to wimax_reset().
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1c09820df585..3608bebd3d9c 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq,
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_entry,
@@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry,
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
- * we can determine the softirq handler runtine.
+ * we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_exit,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 24e9033f8b3f..be88166349a1 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -240,6 +240,7 @@ header-y += matroxfb.h
header-y += mdio.h
header-y += media.h
header-y += mei.h
+header-y += memfd.h
header-y += mempolicy.h
header-y += meye.h
header-y += mic_common.h
@@ -395,6 +396,7 @@ header-y += un.h
header-y += unistd.h
header-y += unix_diag.h
header-y += usbdevice_fs.h
+header-y += usbip.h
header-y += utime.h
header-y += utsname.h
header-y += uuid.h
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 19df18c9b8be..1874ebe9ac1e 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -165,6 +165,7 @@ struct input_keymap_entry {
#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */
#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
+#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..14334d0161d5 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,6 +53,9 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity 12
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7dc8788cfd52..940aced4ed00 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1035,6 +1035,11 @@ static void cgroup_get(struct cgroup *cgrp)
css_get(&cgrp->self);
}
+static bool cgroup_tryget(struct cgroup *cgrp)
+{
+ return css_tryget(&cgrp->self);
+}
+
static void cgroup_put(struct cgroup *cgrp)
{
css_put(&cgrp->self);
@@ -1147,7 +1152,8 @@ static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
* protection against removal. Ensure @cgrp stays accessible and
* break the active_ref protection.
*/
- cgroup_get(cgrp);
+ if (!cgroup_tryget(cgrp))
+ return NULL;
kernfs_break_active_protection(kn);
mutex_lock(&cgroup_mutex);
@@ -3271,8 +3277,17 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
- for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
- cft->flags |= __CFTYPE_NOT_ON_DFL;
+ /*
+ * If legacy_flies_on_dfl, we want to show the legacy files on the
+ * dfl hierarchy but iff the target subsystem hasn't been updated
+ * for the dfl hierarchy yet.
+ */
+ if (!cgroup_legacy_files_on_dfl ||
+ ss->dfl_cftypes != ss->legacy_cftypes) {
+ for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+ cft->flags |= __CFTYPE_NOT_ON_DFL;
+ }
+
return cgroup_add_cftypes(ss, cfts);
}
@@ -4387,6 +4402,15 @@ static void css_release_work_fn(struct work_struct *work)
/* cgroup release path */
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
cgrp->id = -1;
+
+ /*
+ * There are two control paths which try to determine
+ * cgroup from dentry without going through kernfs -
+ * cgroupstats_build() and css_tryget_online_from_dir().
+ * Those are supported by RCU protecting clearing of
+ * cgrp->kn->priv backpointer.
+ */
+ RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
}
mutex_unlock(&cgroup_mutex);
@@ -4543,6 +4567,11 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
struct cftype *base_files;
int ssid, ret;
+ /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
+ */
+ if (strchr(name, '\n'))
+ return -EINVAL;
+
parent = cgroup_kn_lock_live(parent_kn);
if (!parent)
return -ENODEV;
@@ -4820,16 +4849,6 @@ static int cgroup_rmdir(struct kernfs_node *kn)
cgroup_kn_unlock(kn);
- /*
- * There are two control paths which try to determine cgroup from
- * dentry without going through kernfs - cgroupstats_build() and
- * css_tryget_online_from_dir(). Those are supported by RCU
- * protecting clearing of cgrp->kn->priv backpointer, which should
- * happen after all files under it have been removed.
- */
- if (!ret)
- RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL);
-
cgroup_put(cgrp);
return ret;
}
@@ -5416,7 +5435,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
/*
* This path doesn't originate from kernfs and @kn could already
* have been or be removed at any point. @kn->priv is RCU
- * protected for this access. See cgroup_rmdir() for details.
+ * protected for this access. See css_release_work_fn() for details.
*/
cgrp = rcu_dereference(kn->priv);
if (cgrp)
diff --git a/kernel/futex.c b/kernel/futex.c
index d3a9d946d0b7..815d7af2ffe8 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2592,6 +2592,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* shared futexes. We need to compare the keys:
*/
if (match_futex(&q.key, &key2)) {
+ queue_unlock(hb);
ret = -EINVAL;
goto out_put_keys;
}
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index e30ac0fe61c3..0aa69ea1d8fd 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
*/
static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
{
- long ret;
+ long t1, t2;
- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+ t1 = kptr_obfuscate((long)v1, type);
+ t2 = kptr_obfuscate((long)v2, type);
- return (ret < 0) | ((ret > 0) << 1);
+ return (t1 < t2) | ((t1 > t2) << 1);
}
/* The caller must have pinned the task */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e04c455a0e38..1ce770687ea8 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1665,15 +1665,15 @@ asmlinkage int vprintk_emit(int facility, int level,
raw_spin_lock(&logbuf_lock);
logbuf_cpu = this_cpu;
- if (recursion_bug) {
+ if (unlikely(recursion_bug)) {
static const char recursion_msg[] =
"BUG: recent printk recursion!";
recursion_bug = 0;
- text_len = strlen(recursion_msg);
/* emit KERN_CRIT message */
printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
- NULL, 0, recursion_msg, text_len);
+ NULL, 0, recursion_msg,
+ strlen(recursion_msg));
}
/*
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 4aec4a457431..a7077d3ae52f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
ktime_t now)
{
+ unsigned long flags;
struct k_itimer *ptr = container_of(alarm, struct k_itimer,
it.alarm.alarmtimer);
- if (posix_timer_event(ptr, 0) != 0)
- ptr->it_overrun++;
+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+
+ spin_lock_irqsave(&ptr->it_lock, flags);
+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
+ if (posix_timer_event(ptr, 0) != 0)
+ ptr->it_overrun++;
+ }
/* Re-add periodic timers */
if (ptr->it.alarm.interval.tv64) {
ptr->it_overrun += alarm_forward(alarm, now,
ptr->it.alarm.interval);
- return ALARMTIMER_RESTART;
+ result = ALARMTIMER_RESTART;
}
- return ALARMTIMER_NORESTART;
+ spin_unlock_irqrestore(&ptr->it_lock, flags);
+
+ return result;
}
/**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
* @new_timer: k_itimer pointer
* @cur_setting: itimerspec data to fill
*
- * Copies the itimerspec data out from the k_itimer
+ * Copies out the current itimerspec data
*/
static void alarm_timer_get(struct k_itimer *timr,
struct itimerspec *cur_setting)
{
- memset(cur_setting, 0, sizeof(struct itimerspec));
+ ktime_t relative_expiry_time =
+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
+
+ if (ktime_to_ns(relative_expiry_time) > 0) {
+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
+ } else {
+ cur_setting->it_value.tv_sec = 0;
+ cur_setting->it_value.tv_nsec = 0;
+ }
- cur_setting->it_interval =
- ktime_to_timespec(timr->it.alarm.interval);
- cur_setting->it_value =
- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
- return;
+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
}
/**
diff --git a/kernel/time/time.c b/kernel/time/time.c
index f0294ba14634..a9ae20fb0b11 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
* that a remainder subtract here would not do the right thing as the
* resolution values don't fall on second boundries. I.e. the line:
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ * Note that due to the small error in the multiplier here, this
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
+ * OK.
*
* Rather, we just shift the bits off the right.
*
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
* value to a scaled second value.
*/
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
+ nsec = nsec + TICK_NSEC - 1;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
+
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+ return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+
EXPORT_SYMBOL(timespec_to_jiffies);
void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
}
EXPORT_SYMBOL(jiffies_to_timespec);
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
+/*
+ * We could use a similar algorithm to timespec_to_jiffies (with a
+ * different multiplier for usec instead of nsec). But this has a
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
+ * usec value, since it's not necessarily integral.
+ *
+ * We could instead round in the intermediate scaled representation
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
+ * perilous: the scaling introduces a small positive error, which
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
+ * units to the intermediate before shifting) leads to accidental
+ * overflow and overestimates.
+ *
+ * At the cost of one additional multiplication by a constant, just
+ * use the timespec implementation.
*/
unsigned long
timeval_to_jiffies(const struct timeval *value)
{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+ return __timespec_to_jiffies(value->tv_sec,
+ value->tv_usec * NSEC_PER_USEC);
}
EXPORT_SYMBOL(timeval_to_jiffies);
diff --git a/lib/Kconfig b/lib/Kconfig
index a5ce0c7f6c30..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index ae146f0734eb..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..f3c6ff596414 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
diff --git a/mm/memblock.c b/mm/memblock.c
index 70fad0c0dafb..6ecb0d937fb5 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
if (nid != NUMA_NO_NODE && nid != m_nid)
continue;
+ /* skip hotpluggable memory regions if needed */
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+ continue;
+
if (!type_b) {
if (out_start)
*out_start = m_start;
diff --git a/mm/mmap.c b/mm/mmap.c
index c1f2ea4a0b99..c0a3637cdb64 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root)
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
if (vma->vm_start < prev) {
- pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev);
+ pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
bug = 1;
}
if (vma->vm_start < pend) {
- pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend);
+ pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
bug = 1;
}
if (vma->vm_start > vma->vm_end) {
- pr_info("vm_end %lx < vm_start %lx\n",
+ pr_emerg("vm_end %lx < vm_start %lx\n",
vma->vm_end, vma->vm_start);
bug = 1;
}
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
- pr_info("free gap %lx, correct %lx\n",
+ pr_emerg("free gap %lx, correct %lx\n",
vma->rb_subtree_gap,
vma_compute_subtree_gap(vma));
bug = 1;
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root)
for (nd = pn; nd; nd = rb_prev(nd))
j++;
if (i != j) {
- pr_info("backwards %d, forwards %d\n", j, i);
+ pr_emerg("backwards %d, forwards %d\n", j, i);
bug = 1;
}
return bug ? -1 : i;
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm)
i++;
}
if (i != mm->map_count) {
- pr_info("map_count %d vm_next %d\n", mm->map_count, i);
+ pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
bug = 1;
}
if (highest_address != mm->highest_vm_end) {
- pr_info("mm->highest_vm_end %lx, found %lx\n",
+ pr_emerg("mm->highest_vm_end %lx, found %lx\n",
mm->highest_vm_end, highest_address);
bug = 1;
}
i = browse_rb(&mm->mm_rb);
if (i != mm->map_count) {
- pr_info("map_count %d rb %d\n", mm->map_count, i);
+ pr_emerg("map_count %d rb %d\n", mm->map_count, i);
bug = 1;
}
BUG_ON(bug);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7ed58602e71b..7c7ab32ee503 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void)
phys_addr_t start, end;
u64 i;
+ memblock_clear_hotplug(0, -1);
+
for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
count += __free_memory_core(start, end);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 3707c71ae4cd..51108165f829 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
- unsigned int cpu;
+ unsigned int cpu, tcpu;
int i;
for_each_possible_cpu(cpu) {
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
- if (!*pagep) {
- pcpu_free_pages(chunk, pages, populated,
- page_start, page_end);
- return -ENOMEM;
- }
+ if (!*pagep)
+ goto err;
}
}
return 0;
+
+err:
+ while (--i >= page_start)
+ __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+ for_each_possible_cpu(tcpu) {
+ if (tcpu == cpu)
+ break;
+ for (i = page_start; i < page_end; i++)
+ __free_page(pages[pcpu_page_idx(tcpu, i)]);
+ }
+ return -ENOMEM;
}
/**
@@ -263,6 +272,7 @@ err:
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
page_end - page_start);
}
+ pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
return err;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 2139e30a4b44..da997f9800bd 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void)
if (pcpu_setup_first_chunk(ai, fc) < 0)
panic("Failed to initialize percpu areas.");
+
+ pcpu_free_alloc_info(ai);
}
#endif /* CONFIG_SMP */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b50dabb3f86a..faff6247ac8f 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -589,6 +589,14 @@ EXPORT_SYMBOL(hci_get_route);
void hci_le_conn_failed(struct hci_conn *conn, u8 status)
{
struct hci_dev *hdev = conn->hdev;
+ struct hci_conn_params *params;
+
+ params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+ conn->dst_type);
+ if (params && params->conn) {
+ hci_conn_drop(params->conn);
+ params->conn = NULL;
+ }
conn->state = BT_CLOSED;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index c32d361c0cf7..1d9c29a00568 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2536,8 +2536,13 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
{
struct hci_conn_params *p;
- list_for_each_entry(p, &hdev->le_conn_params, list)
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ if (p->conn) {
+ hci_conn_drop(p->conn);
+ p->conn = NULL;
+ }
list_del_init(&p->action);
+ }
BT_DBG("All LE pending actions cleared");
}
@@ -2578,8 +2583,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hci_dev_lock(hdev);
hci_inquiry_cache_flush(hdev);
- hci_conn_hash_flush(hdev);
hci_pend_le_actions_clear(hdev);
+ hci_conn_hash_flush(hdev);
hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
@@ -3727,6 +3732,9 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
if (!params)
return;
+ if (params->conn)
+ hci_conn_drop(params->conn);
+
list_del(&params->action);
list_del(&params->list);
kfree(params);
@@ -3757,6 +3765,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
struct hci_conn_params *params, *tmp;
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+ if (params->conn)
+ hci_conn_drop(params->conn);
list_del(&params->action);
list_del(&params->list);
kfree(params);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index be35598984d9..a6000823f0ff 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4221,8 +4221,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_proto_connect_cfm(conn, ev->status);
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
- if (params)
+ if (params) {
list_del_init(&params->action);
+ if (params->conn) {
+ hci_conn_drop(params->conn);
+ params->conn = NULL;
+ }
+ }
unlock:
hci_update_background_scan(hdev);
@@ -4304,8 +4309,16 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
- if (!IS_ERR(conn))
+ if (!IS_ERR(conn)) {
+ /* Store the pointer since we don't really have any
+ * other owner of the object besides the params that
+ * triggered it. This way we can abort the connection if
+ * the parameters get removed and keep the reference
+ * count consistent once the connection is established.
+ */
+ params->conn = conn;
return;
+ }
switch (PTR_ERR(conn)) {
case -EBUSY:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 96238ba95f2b..de6662b14e1f 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -13,8 +13,6 @@
#include "auth_x.h"
#include "auth_x_protocol.h"
-#define TEMP_TICKET_BUF_LEN 256
-
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
}
static int ceph_x_decrypt(struct ceph_crypto_key *secret,
- void **p, void *end, void *obuf, size_t olen)
+ void **p, void *end, void **obuf, size_t olen)
{
struct ceph_x_encrypt_header head;
size_t head_len = sizeof(head);
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
return -EINVAL;
dout("ceph_x_decrypt len %d\n", len);
- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
- *p, len);
+ if (*obuf == NULL) {
+ *obuf = kmalloc(len, GFP_NOFS);
+ if (!*obuf)
+ return -ENOMEM;
+ olen = len;
+ }
+
+ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
if (ret)
return ret;
if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
kfree(th);
}
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
- struct ceph_crypto_key *secret,
- void *buf, void *end)
+static int process_one_ticket(struct ceph_auth_client *ac,
+ struct ceph_crypto_key *secret,
+ void **p, void *end)
{
struct ceph_x_info *xi = ac->private;
- int num;
- void *p = buf;
+ int type;
+ u8 tkt_struct_v, blob_struct_v;
+ struct ceph_x_ticket_handler *th;
+ void *dbuf = NULL;
+ void *dp, *dend;
+ int dlen;
+ char is_enc;
+ struct timespec validity;
+ struct ceph_crypto_key old_key;
+ void *ticket_buf = NULL;
+ void *tp, *tpend;
+ struct ceph_timespec new_validity;
+ struct ceph_crypto_key new_session_key;
+ struct ceph_buffer *new_ticket_blob;
+ unsigned long new_expires, new_renew_after;
+ u64 new_secret_id;
int ret;
- char *dbuf;
- char *ticket_buf;
- u8 reply_struct_v;
- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
- if (!dbuf)
- return -ENOMEM;
+ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
- ret = -ENOMEM;
- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
- if (!ticket_buf)
- goto out_dbuf;
+ type = ceph_decode_32(p);
+ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
- reply_struct_v = ceph_decode_8(&p);
- if (reply_struct_v != 1)
+ tkt_struct_v = ceph_decode_8(p);
+ if (tkt_struct_v != 1)
goto bad;
- num = ceph_decode_32(&p);
- dout("%d tickets\n", num);
- while (num--) {
- int type;
- u8 tkt_struct_v, blob_struct_v;
- struct ceph_x_ticket_handler *th;
- void *dp, *dend;
- int dlen;
- char is_enc;
- struct timespec validity;
- struct ceph_crypto_key old_key;
- void *tp, *tpend;
- struct ceph_timespec new_validity;
- struct ceph_crypto_key new_session_key;
- struct ceph_buffer *new_ticket_blob;
- unsigned long new_expires, new_renew_after;
- u64 new_secret_id;
-
- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
-
- type = ceph_decode_32(&p);
- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
-
- tkt_struct_v = ceph_decode_8(&p);
- if (tkt_struct_v != 1)
- goto bad;
-
- th = get_ticket_handler(ac, type);
- if (IS_ERR(th)) {
- ret = PTR_ERR(th);
- goto out;
- }
- /* blob for me */
- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
- TEMP_TICKET_BUF_LEN);
- if (dlen <= 0) {
- ret = dlen;
- goto out;
- }
- dout(" decrypted %d bytes\n", dlen);
- dend = dbuf + dlen;
- dp = dbuf;
+ th = get_ticket_handler(ac, type);
+ if (IS_ERR(th)) {
+ ret = PTR_ERR(th);
+ goto out;
+ }
- tkt_struct_v = ceph_decode_8(&dp);
- if (tkt_struct_v != 1)
- goto bad;
+ /* blob for me */
+ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+ if (dlen <= 0) {
+ ret = dlen;
+ goto out;
+ }
+ dout(" decrypted %d bytes\n", dlen);
+ dp = dbuf;
+ dend = dp + dlen;
- memcpy(&old_key, &th->session_key, sizeof(old_key));
- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
- if (ret)
- goto out;
+ tkt_struct_v = ceph_decode_8(&dp);
+ if (tkt_struct_v != 1)
+ goto bad;
- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
- ceph_decode_timespec(&validity, &new_validity);
- new_expires = get_seconds() + validity.tv_sec;
- new_renew_after = new_expires - (validity.tv_sec / 4);
- dout(" expires=%lu renew_after=%lu\n", new_expires,
- new_renew_after);
+ memcpy(&old_key, &th->session_key, sizeof(old_key));
+ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+ if (ret)
+ goto out;
- /* ticket blob for service */
- ceph_decode_8_safe(&p, end, is_enc, bad);
- tp = ticket_buf;
- if (is_enc) {
- /* encrypted */
- dout(" encrypted ticket\n");
- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
- TEMP_TICKET_BUF_LEN);
- if (dlen < 0) {
- ret = dlen;
- goto out;
- }
- dlen = ceph_decode_32(&tp);
- } else {
- /* unencrypted */
- ceph_decode_32_safe(&p, end, dlen, bad);
- ceph_decode_need(&p, end, dlen, bad);
- ceph_decode_copy(&p, ticket_buf, dlen);
+ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+ ceph_decode_timespec(&validity, &new_validity);
+ new_expires = get_seconds() + validity.tv_sec;
+ new_renew_after = new_expires - (validity.tv_sec / 4);
+ dout(" expires=%lu renew_after=%lu\n", new_expires,
+ new_renew_after);
+
+ /* ticket blob for service */
+ ceph_decode_8_safe(p, end, is_enc, bad);
+ if (is_enc) {
+ /* encrypted */
+ dout(" encrypted ticket\n");
+ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out;
}
- tpend = tp + dlen;
- dout(" ticket blob is %d bytes\n", dlen);
- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
- blob_struct_v = ceph_decode_8(&tp);
- new_secret_id = ceph_decode_64(&tp);
- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
- if (ret)
+ tp = ticket_buf;
+ dlen = ceph_decode_32(&tp);
+ } else {
+ /* unencrypted */
+ ceph_decode_32_safe(p, end, dlen, bad);
+ ticket_buf = kmalloc(dlen, GFP_NOFS);
+ if (!ticket_buf) {
+ ret = -ENOMEM;
goto out;
-
- /* all is well, update our ticket */
- ceph_crypto_key_destroy(&th->session_key);
- if (th->ticket_blob)
- ceph_buffer_put(th->ticket_blob);
- th->session_key = new_session_key;
- th->ticket_blob = new_ticket_blob;
- th->validity = new_validity;
- th->secret_id = new_secret_id;
- th->expires = new_expires;
- th->renew_after = new_renew_after;
- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
- type, ceph_entity_type_name(type), th->secret_id,
- (int)th->ticket_blob->vec.iov_len);
- xi->have_keys |= th->service;
+ }
+ tp = ticket_buf;
+ ceph_decode_need(p, end, dlen, bad);
+ ceph_decode_copy(p, ticket_buf, dlen);
}
+ tpend = tp + dlen;
+ dout(" ticket blob is %d bytes\n", dlen);
+ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+ blob_struct_v = ceph_decode_8(&tp);
+ new_secret_id = ceph_decode_64(&tp);
+ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+ if (ret)
+ goto out;
+
+ /* all is well, update our ticket */
+ ceph_crypto_key_destroy(&th->session_key);
+ if (th->ticket_blob)
+ ceph_buffer_put(th->ticket_blob);
+ th->session_key = new_session_key;
+ th->ticket_blob = new_ticket_blob;
+ th->validity = new_validity;
+ th->secret_id = new_secret_id;
+ th->expires = new_expires;
+ th->renew_after = new_renew_after;
+ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+ type, ceph_entity_type_name(type), th->secret_id,
+ (int)th->ticket_blob->vec.iov_len);
+ xi->have_keys |= th->service;
- ret = 0;
out:
kfree(ticket_buf);
-out_dbuf:
kfree(dbuf);
return ret;
@@ -270,6 +255,34 @@ bad:
goto out;
}
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+ struct ceph_crypto_key *secret,
+ void *buf, void *end)
+{
+ void *p = buf;
+ u8 reply_struct_v;
+ u32 num;
+ int ret;
+
+ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
+ if (reply_struct_v != 1)
+ return -EINVAL;
+
+ ceph_decode_32_safe(&p, end, num, bad);
+ dout("%d tickets\n", num);
+
+ while (num--) {
+ ret = process_one_ticket(ac, secret, &p, end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+bad:
+ return -EINVAL;
+}
+
static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th,
struct ceph_x_authorizer *au)
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th;
int ret = 0;
struct ceph_x_authorize_reply reply;
+ void *preply = &reply;
void *p = au->reply_buf;
void *end = p + sizeof(au->reply_buf);
th = get_ticket_handler(ac, au->service);
if (IS_ERR(th))
return PTR_ERR(th);
- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
+ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
if (ret < 0)
return ret;
if (ret != sizeof(reply))
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 067d3af2eaf6..61fcfc304f68 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
if (!m) {
pr_info("alloc_msg unknown type %d\n", type);
*skip = 1;
+ } else if (front_len > m->front_alloc_len) {
+ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
+ front_len, m->front_alloc_len,
+ (unsigned int)con->peer_name.type,
+ le64_to_cpu(con->peer_name.num));
+ ceph_msg_put(m);
+ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
}
+
return m;
}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 488dd1a825c0..fdbc9a81d4c2 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -775,7 +775,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
EXPORT_SYMBOL(__skb_checksum_complete);
/**
- * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
+ * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
* @skb: skbuff
* @hlen: hardware length
* @iov: io vector
diff --git a/net/core/dev.c b/net/core/dev.c
index b65a5051361f..ab9a16530c36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2587,13 +2587,19 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
return harmonize_features(skb, features);
}
- features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
+ features = netdev_intersect_features(features,
+ skb->dev->vlan_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
- features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
+ features = netdev_intersect_features(features,
+ NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
return harmonize_features(skb, features);
}
@@ -4889,7 +4895,8 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
if (adj->master)
sysfs_remove_link(&(dev->dev.kobj), "master");
- if (netdev_adjacent_is_neigh_list(dev, dev_list))
+ if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
+ net_eq(dev_net(dev),dev_net(adj_dev)))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list);
@@ -5159,11 +5166,65 @@ void netdev_upper_dev_unlink(struct net_device *dev,
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
+void netdev_adjacent_add_links(struct net_device *dev)
+{
+ struct netdev_adjacent *iter;
+
+ struct net *net = dev_net(dev);
+
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_add(iter->dev, dev,
+ &iter->dev->adj_list.lower);
+ netdev_adjacent_sysfs_add(dev, iter->dev,
+ &dev->adj_list.upper);
+ }
+
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_add(iter->dev, dev,
+ &iter->dev->adj_list.upper);
+ netdev_adjacent_sysfs_add(dev, iter->dev,
+ &dev->adj_list.lower);
+ }
+}
+
+void netdev_adjacent_del_links(struct net_device *dev)
+{
+ struct netdev_adjacent *iter;
+
+ struct net *net = dev_net(dev);
+
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_del(iter->dev, dev->name,
+ &iter->dev->adj_list.lower);
+ netdev_adjacent_sysfs_del(dev, iter->dev->name,
+ &dev->adj_list.upper);
+ }
+
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
+ netdev_adjacent_sysfs_del(iter->dev, dev->name,
+ &iter->dev->adj_list.upper);
+ netdev_adjacent_sysfs_del(dev, iter->dev->name,
+ &dev->adj_list.lower);
+ }
+}
+
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
{
struct netdev_adjacent *iter;
+ struct net *net = dev_net(dev);
+
list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
netdev_adjacent_sysfs_del(iter->dev, oldname,
&iter->dev->adj_list.lower);
netdev_adjacent_sysfs_add(iter->dev, dev,
@@ -5171,6 +5232,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
}
list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ if (!net_eq(net,dev_net(iter->dev)))
+ continue;
netdev_adjacent_sysfs_del(iter->dev, oldname,
&iter->dev->adj_list.upper);
netdev_adjacent_sysfs_add(iter->dev, dev,
@@ -6773,6 +6836,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Send a netdev-removed uevent to the old namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
+ netdev_adjacent_del_links(dev);
/* Actually switch the network namespace */
dev_net_set(dev, net);
@@ -6787,6 +6851,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Send a netdev-add uevent to the new namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+ netdev_adjacent_add_links(dev);
/* Fixup kobjects */
err = device_rename(&dev->dev, dev->name);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 6b5b6e7013ca..9d33dfffca19 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -197,7 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
* as destination. A new timer with the interval specified in the
* configuration TLV is created. Upon each interval, the latest statistics
* will be read from &bstats and the estimated rate will be stored in
- * &rate_est with the statistics lock grabed during this period.
+ * &rate_est with the statistics lock grabbed during this period.
*
* Returns 0 on success or a negative error code.
*
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 9d3d9e78397b..2ddbce4cce14 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(gnet_stats_copy_queue);
* @st: application specific statistics data
* @len: length of data
*
- * Appends the application sepecific statistics to the top level TLV created by
+ * Appends the application specific statistics to the top level TLV created by
* gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
* handle is in backward compatibility mode.
*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 163b673f9e62..da1378a3e2c7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2647,7 +2647,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
* skb_seq_read() will return the remaining part of the block.
*
* Note 1: The size of each block of data returned can be arbitrary,
- * this limitation is the cost for zerocopy seqeuental
+ * this limitation is the cost for zerocopy sequential
* reads of potentially non linear data.
*
* Note 2: Fragment lists within fragments are not implemented
@@ -2781,7 +2781,7 @@ EXPORT_SYMBOL(skb_find_text);
/**
* skb_append_datato_frags - append the user data to a skb
* @sk: sock structure
- * @skb: skb structure to be appened with user data.
+ * @skb: skb structure to be appended with user data.
* @getfrag: call back function to be used for getting the user data
* @from: pointer to user message iov
* @length: length of the iov message
diff --git a/net/core/sock.c b/net/core/sock.c
index 2714811afbd8..d372b4bd3f99 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(sk_ns_capable);
/**
* sk_capable - Socket global capability test
* @sk: Socket to use a capability on or through
- * @cap: The global capbility to use
+ * @cap: The global capability to use
*
* Test to see if the opener of the socket had when the socket was
* created and the current process has the capability @cap in all user
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(sk_capable);
* @sk: Socket to use a capability on or through
* @cap: The capability to use
*
- * Test to see if the opener of the socket had when the socke was created
+ * Test to see if the opener of the socket had when the socket was created
* and the current process has the capability @cap over the network namespace
* the socket is a member of.
*/
@@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
order);
if (page)
goto fill_page;
+ /* Do not retry other high order allocations */
+ order = 1;
+ max_page_order = 0;
}
order--;
}
@@ -1869,10 +1872,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
* no guarantee that allocations succeed. Therefore, @sz MUST be
* less or equal than PAGE_SIZE.
*/
-bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
{
- int order;
-
if (pfrag->page) {
if (atomic_read(&pfrag->page->_count) == 1) {
pfrag->offset = 0;
@@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
put_page(pfrag->page);
}
- order = SKB_FRAG_PAGE_ORDER;
- do {
- gfp_t gfp = prio;
-
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
- pfrag->page = alloc_pages(gfp, order);
+ pfrag->offset = 0;
+ if (SKB_FRAG_PAGE_ORDER) {
+ pfrag->page = alloc_pages(gfp | __GFP_COMP |
+ __GFP_NOWARN | __GFP_NORETRY,
+ SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
- pfrag->offset = 0;
- pfrag->size = PAGE_SIZE << order;
+ pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
return true;
}
- } while (--order >= 0);
-
+ }
+ pfrag->page = alloc_page(gfp);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE;
+ return true;
+ }
return false;
}
EXPORT_SYMBOL(skb_page_frag_refill);
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
index 016b77ee88f0..6591d27e53a4 100644
--- a/net/ieee802154/6lowpan_rtnl.c
+++ b/net/ieee802154/6lowpan_rtnl.c
@@ -246,7 +246,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
return ERR_PTR(-rc);
}
} else {
- frag = ERR_PTR(ENOMEM);
+ frag = ERR_PTR(-ENOMEM);
}
return frag;
@@ -437,7 +437,7 @@ static void lowpan_setup(struct net_device *dev)
/* Frame Control + Sequence Number + Address fields + Security Header */
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
- dev->mtu = 1281;
+ dev->mtu = IPV6_MIN_MTU;
dev->tx_queue_len = 0;
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = 0;
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
index ffec6ce51005..32755cb7e64e 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/reassembly.c
@@ -355,8 +355,6 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
struct net *net = dev_net(skb->dev);
struct lowpan_frag_info *frag_info = lowpan_cb(skb);
struct ieee802154_addr source, dest;
- struct netns_ieee802154_lowpan *ieee802154_lowpan =
- net_ieee802154_lowpan(net);
int err;
source = mac_cb(skb)->source;
@@ -366,8 +364,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
if (err < 0)
goto err;
- if (frag_info->d_size > ieee802154_lowpan->max_dsize)
+ if (frag_info->d_size > IPV6_MIN_MTU) {
+ net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
goto err;
+ }
fq = fq_find(net, frag_info, &source, &dest);
if (fq != NULL) {
@@ -415,13 +415,6 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
- .procname = "6lowpanfrag_max_datagram_size",
- .data = &init_net.ieee802154_lowpan.max_dsize,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
{ }
};
@@ -458,7 +451,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
table[1].data = &ieee802154_lowpan->frags.low_thresh;
table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
table[2].data = &ieee802154_lowpan->frags.timeout;
- table[3].data = &ieee802154_lowpan->max_dsize;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
@@ -533,7 +525,6 @@ static int __net_init lowpan_frags_init_net(struct net *net)
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
- ieee802154_lowpan->max_dsize = 0xFFFF;
inet_frags_init_net(&ieee802154_lowpan->frags);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index fb173126f03d..7cbcaf4f0194 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -82,6 +82,52 @@ config NF_TABLES_ARP
help
This option enables the ARP support for nf_tables.
+config NF_NAT_IPV4
+ tristate "IPv4 NAT"
+ depends on NF_CONNTRACK_IPV4
+ default m if NETFILTER_ADVANCED=n
+ select NF_NAT
+ help
+ The IPv4 NAT option allows masquerading, port forwarding and other
+ forms of full Network Address Port Translation. This can be
+ controlled by iptables or nft.
+
+if NF_NAT_IPV4
+
+config NF_NAT_SNMP_BASIC
+ tristate "Basic SNMP-ALG support"
+ depends on NF_CONNTRACK_SNMP
+ depends on NETFILTER_ADVANCED
+ default NF_NAT && NF_CONNTRACK_SNMP
+ ---help---
+
+ This module implements an Application Layer Gateway (ALG) for
+ SNMP payloads. In conjunction with NAT, it allows a network
+ management system to access multiple private networks with
+ conflicting addresses. It works by modifying IP addresses
+ inside SNMP payloads to match IP-layer NAT mapping.
+
+ This is the "basic" form of SNMP-ALG, as described in RFC 2962
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NF_NAT_PROTO_GRE
+ tristate
+ depends on NF_CT_PROTO_GRE
+
+config NF_NAT_PPTP
+ tristate
+ depends on NF_CONNTRACK
+ default NF_CONNTRACK_PPTP
+ select NF_NAT_PROTO_GRE
+
+config NF_NAT_H323
+ tristate
+ depends on NF_CONNTRACK
+ default NF_CONNTRACK_H323
+
+endif # NF_NAT_IPV4
+
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
default m if NETFILTER_ADVANCED=n
@@ -170,19 +216,21 @@ config IP_NF_TARGET_SYNPROXY
To compile it as a module, choose M here. If unsure, say N.
# NAT + specific targets: nf_conntrack
-config NF_NAT_IPV4
- tristate "IPv4 NAT"
+config IP_NF_NAT
+ tristate "iptables NAT support"
depends on NF_CONNTRACK_IPV4
default m if NETFILTER_ADVANCED=n
select NF_NAT
+ select NF_NAT_IPV4
+ select NETFILTER_XT_NAT
help
- The IPv4 NAT option allows masquerading, port forwarding and other
- forms of full Network Address Port Translation. It is controlled by
- the `nat' table in iptables: see the man page for iptables(8).
+ This enables the `nat' table in iptables. This allows masquerading,
+ port forwarding and other forms of full Network Address Port
+ Translation.
To compile it as a module, choose M here. If unsure, say N.
-if NF_NAT_IPV4
+if IP_NF_NAT
config IP_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
@@ -214,47 +262,7 @@ config IP_NF_TARGET_REDIRECT
(e.g. when running oldconfig). It selects
CONFIG_NETFILTER_XT_TARGET_REDIRECT.
-endif
-
-config NF_NAT_SNMP_BASIC
- tristate "Basic SNMP-ALG support"
- depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
- depends on NETFILTER_ADVANCED
- default NF_NAT && NF_CONNTRACK_SNMP
- ---help---
-
- This module implements an Application Layer Gateway (ALG) for
- SNMP payloads. In conjunction with NAT, it allows a network
- management system to access multiple private networks with
- conflicting addresses. It works by modifying IP addresses
- inside SNMP payloads to match IP-layer NAT mapping.
-
- This is the "basic" form of SNMP-ALG, as described in RFC 2962
-
- To compile it as a module, choose M here. If unsure, say N.
-
-# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
-# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.
-# From kconfig-language.txt:
-#
-# <expr> '&&' <expr> (6)
-#
-# (6) Returns the result of min(/expr/, /expr/).
-
-config NF_NAT_PROTO_GRE
- tristate
- depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
-
-config NF_NAT_PPTP
- tristate
- depends on NF_CONNTRACK && NF_NAT_IPV4
- default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
- select NF_NAT_PROTO_GRE
-
-config NF_NAT_H323
- tristate
- depends on NF_CONNTRACK && NF_NAT_IPV4
- default NF_NAT_IPV4 && NF_CONNTRACK_H323
+endif # IP_NF_NAT
# mangle + specific targets
config IP_NF_MANGLE
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 33001621465b..edf4af32e9f2 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
+obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0b239fc1816e..fc1fac2a0528 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1690,14 +1690,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
addrconf_mod_dad_work(ifp, 0);
}
-/* Join to solicited addr multicast group. */
-
+/* Join to solicited addr multicast group.
+ * caller must hold RTNL */
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
{
struct in6_addr maddr;
- ASSERT_RTNL();
-
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@@ -1705,12 +1703,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
ipv6_dev_mc_inc(dev, &maddr);
}
+/* caller must hold RTNL */
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct in6_addr maddr;
- ASSERT_RTNL();
-
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@@ -1718,12 +1715,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
__ipv6_dev_mc_dec(idev, &maddr);
}
+/* caller must hold RTNL */
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- ASSERT_RTNL();
-
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1732,12 +1728,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
}
+/* caller must hold RTNL */
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- ASSERT_RTNL();
-
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -4773,15 +4768,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
addrconf_leave_solict(ifp->idev, &ifp->addr);
if (!ipv6_addr_any(&ifp->peer_addr)) {
struct rt6_info *rt;
- struct net_device *dev = ifp->idev->dev;
-
- rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
- dev->ifindex, 1);
- if (rt) {
- dst_hold(&rt->dst);
- if (ip6_del_rt(rt))
- dst_free(&rt->dst);
- }
+
+ rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
+ ifp->idev->dev, 0, 0);
+ if (rt && ip6_del_rt(rt))
+ dst_free(&rt->dst);
}
dst_hold(&ifp->rt->dst);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 210183244689..ff2de7d9d8e6 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
pac->acl_next = NULL;
pac->acl_addr = *addr;
+ rtnl_lock();
rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
error:
rcu_read_unlock();
+ rtnl_unlock();
if (pac)
sock_kfree_s(sk, pac, sizeof(*pac));
return err;
@@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
spin_unlock_bh(&ipv6_sk_ac_lock);
+ rtnl_lock();
rcu_read_lock();
dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
if (dev)
ipv6_dev_ac_dec(dev, &pac->acl_addr);
rcu_read_unlock();
+ rtnl_unlock();
sock_kfree_s(sk, pac, sizeof(*pac));
return 0;
@@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk)
spin_unlock_bh(&ipv6_sk_ac_lock);
prev_index = 0;
+ rtnl_lock();
rcu_read_lock();
while (pac) {
struct ipv6_ac_socklist *next = pac->acl_next;
@@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk)
pac = next;
}
rcu_read_unlock();
+ rtnl_unlock();
}
static void aca_put(struct ifacaddr6 *ac)
@@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
struct rt6_info *rt;
int err;
+ ASSERT_RTNL();
+
idev = in6_dev_get(dev);
if (idev == NULL)
@@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct ifacaddr6 *aca, *prev_aca;
+ ASSERT_RTNL();
+
write_lock_bh(&idev->lock);
prev_aca = NULL;
for (aca = idev->ac_list; aca; aca = aca->aca_next) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 617f0958e164..a23b655a7627 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
mc_lst->next = NULL;
mc_lst->addr = *addr;
+ rtnl_lock();
rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
@@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (dev == NULL) {
rcu_read_unlock();
+ rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
}
@@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (err) {
rcu_read_unlock();
+ rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return err;
}
@@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
spin_unlock(&ipv6_sk_mc_lock);
rcu_read_unlock();
+ rtnl_unlock();
return 0;
}
@@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
+ rtnl_lock();
spin_lock(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list;
(mc_lst = rcu_dereference_protected(*lnk,
@@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
rcu_read_unlock();
+ rtnl_unlock();
+
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
return 0;
}
}
spin_unlock(&ipv6_sk_mc_lock);
+ rtnl_unlock();
return -EADDRNOTAVAIL;
}
@@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk)
if (!rcu_access_pointer(np->ipv6_mc_list))
return;
+ rtnl_lock();
spin_lock(&ipv6_sk_mc_lock);
while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
@@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk)
spin_lock(&ipv6_sk_mc_lock);
}
spin_unlock(&ipv6_sk_mc_lock);
+ rtnl_unlock();
}
int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
struct ifmcaddr6 *mc;
struct inet6_dev *idev;
+ ASSERT_RTNL();
+
/* we need to take a reference on idev */
idev = in6_dev_get(dev);
@@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct ifmcaddr6 *ma, **map;
+ ASSERT_RTNL();
+
write_lock_bh(&idev->lock);
for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index ac93df16f5af..2812816aabdc 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -57,9 +57,19 @@ config NFT_REJECT_IPV6
config NF_LOG_IPV6
tristate "IPv6 packet logging"
- depends on NETFILTER_ADVANCED
+ default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
+config NF_NAT_IPV6
+ tristate "IPv6 NAT"
+ depends on NF_CONNTRACK_IPV6
+ depends on NETFILTER_ADVANCED
+ select NF_NAT
+ help
+ The IPv6 NAT option allows masquerading, port forwarding and other
+ forms of full Network Address Port Translation. This can be
+ controlled by iptables or nft.
+
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
@@ -232,19 +242,21 @@ config IP6_NF_SECURITY
If unsure, say N.
-config NF_NAT_IPV6
- tristate "IPv6 NAT"
+config IP6_NF_NAT
+ tristate "ip6tables NAT support"
depends on NF_CONNTRACK_IPV6
depends on NETFILTER_ADVANCED
select NF_NAT
+ select NF_NAT_IPV6
+ select NETFILTER_XT_NAT
help
- The IPv6 NAT option allows masquerading, port forwarding and other
- forms of full Network Address Port Translation. It is controlled by
- the `nat' table in ip6tables, see the man page for ip6tables(8).
+ This enables the `nat' table in ip6tables. This allows masquerading,
+ port forwarding and other forms of full Network Address Port
+ Translation.
To compile it as a module, choose M here. If unsure, say N.
-if NF_NAT_IPV6
+if IP6_NF_NAT
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
@@ -265,7 +277,7 @@ config IP6_NF_TARGET_NPT
To compile it as a module, choose M here. If unsure, say N.
-endif # NF_NAT_IPV6
+endif # IP6_NF_NAT
endif # IP6_NF_IPTABLES
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c0b263104ed2..c3d3286db4bb 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
-obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
+obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
# objects for l3 independent conntrack
nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 13752d96275e..b704a9356208 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
/* If PMTU discovery was enabled, use the MTU that was discovered */
dst = sk_dst_get(tunnel->sock);
if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+ u32 pmtu = dst_mtu(dst);
+
if (pmtu != 0)
session->mtu = session->mru = pmtu -
PPPOL2TP_HEADER_OVERHEAD;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 0375009ddc0d..399ad82c997f 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -541,6 +541,8 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
continue;
if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
continue;
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
if (!compat)
compat = &sdata->vif.bss_conf.chandef;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3db96648b45a..86173c0de40e 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -167,7 +167,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
- "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
+ "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 01eede7406a5..f75e5f132c5a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1175,8 +1175,8 @@ static void ieee80211_iface_work(struct work_struct *work)
if (sta) {
u16 last_seq;
- last_seq = le16_to_cpu(
- sta->last_seq_ctrl[rx_agg->tid]);
+ last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(
+ sta->last_seq_ctrl[rx_agg->tid]));
__ieee80211_start_rx_ba_session(sta,
0, 0,
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 63b874101b27..c47194d27149 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -959,7 +959,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
if (!matches_local)
event = CNF_RJCT;
if (!mesh_plink_free_count(sdata) ||
- (sta->llid != llid || sta->plid != plid))
+ sta->llid != llid ||
+ (sta->plid && sta->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
@@ -1080,6 +1081,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
goto unlock_rcu;
}
+ /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+ if (!sta->plid && event == CNF_ACPT)
+ sta->plid = plid;
+
changed |= mesh_plink_fsm(sdata, sta, event);
unlock_rcu:
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 31a8afaf7332..b82a12a9f0f1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4376,8 +4376,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
if (bss->wmm_used && bss->uapsd_supported &&
- (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
- sdata->wmm_acm != 0xff) {
+ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
} else {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c6ee2139fbc5..441875f03750 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1094,8 +1094,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
unsigned long flags;
struct ps_data *ps;
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+ u.ap);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
ps = &sdata->bss->ps;
else if (ieee80211_vif_is_mesh(&sdata->vif))
ps = &sdata->u.mesh.ps;
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 3c3069fd6971..547838822d5e 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -462,7 +462,10 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
skb->pkt_type = PACKET_OTHERHOST;
break;
default:
- break;
+ spin_unlock_bh(&sdata->mib_lock);
+ pr_debug("invalid dest mode\n");
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
spin_unlock_bh(&sdata->mib_lock);
@@ -573,6 +576,7 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
ret = mac802154_parse_frame_start(skb, &hdr);
if (ret) {
pr_debug("got invalid frame\n");
+ kfree_skb(skb);
return;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index ad751fe2e82b..b5c1d3aadb41 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -499,7 +499,7 @@ config NFT_LIMIT
config NFT_NAT
depends on NF_TABLES
depends on NF_CONNTRACK
- depends on NF_NAT
+ select NF_NAT
tristate "Netfilter nf_tables nat module"
help
This option adds the "nat" expression that you can use to perform
@@ -747,7 +747,9 @@ config NETFILTER_XT_TARGET_LED
config NETFILTER_XT_TARGET_LOG
tristate "LOG target support"
- depends on NF_LOG_IPV4 && NF_LOG_IPV6
+ select NF_LOG_COMMON
+ select NF_LOG_IPV4
+ select NF_LOG_IPV6 if IPV6
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in
@@ -764,6 +766,14 @@ config NETFILTER_XT_TARGET_MARK
(e.g. when running oldconfig). It selects
CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
+config NETFILTER_XT_NAT
+ tristate '"SNAT and DNAT" targets support'
+ depends on NF_NAT
+ ---help---
+ This option enables the SNAT and DNAT targets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_NETMAP
tristate '"NETMAP" target support'
depends on NF_NAT
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 8308624a406a..fad5fdba34e5 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -95,7 +95,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
-obj-$(CONFIG_NF_NAT) += xt_nat.o
+obj-$(CONFIG_NETFILTER_XT_NAT) += xt_nat.o
# targets
obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a93c97f106d4..024a2e25c8a4 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
EXPORT_SYMBOL(nf_hooks);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
#endif
@@ -72,7 +72,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
}
list_add_rcu(&reg->list, elem->list.prev);
mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
return 0;
@@ -84,7 +84,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
mutex_lock(&nf_hook_mutex);
list_del_rcu(&reg->list);
mutex_unlock(&nf_hook_mutex);
-#if defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
synchronize_net();
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index e6836755c45d..5c34e8d42e01 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_reply6,
.owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 6f70bdd3a90a..56896a412bce 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -38,6 +38,7 @@
#include <net/route.h> /* for ip_route_output */
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
#include <net/addrconf.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
@@ -862,11 +863,15 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
old_iph = ip_hdr(skb);
}
- skb->transport_header = skb->network_header;
-
/* fix old IP header checksum */
ip_send_check(old_iph);
+ skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+ if (IS_ERR(skb))
+ goto tx_error;
+
+ skb->transport_header = skb->network_header;
+
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -900,7 +905,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
return NF_STOLEN;
tx_error:
- kfree_skb(skb);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
@@ -953,6 +959,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
old_iph = ipv6_hdr(skb);
}
+ /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */
+ skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */
+ if (IS_ERR(skb))
+ goto tx_error;
+
skb->transport_header = skb->network_header;
skb_push(skb, sizeof(struct ipv6hdr));
@@ -988,7 +999,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
return NF_STOLEN;
tx_error:
- kfree_skb(skb);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index f4e833005320..7198d660b4de 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct xt_mtchk_param *par)
if (info->invert & ~1)
return -EINVAL;
- return info->id ? 0 : -EINVAL;
+ return 0;
}
static bool
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 7228ec3faf19..91d66b7e64ac 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -265,8 +265,11 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
upcall.key = &key;
upcall.userdata = NULL;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
- ovs_dp_upcall(dp, skb, &upcall);
- consume_skb(skb);
+ error = ovs_dp_upcall(dp, skb, &upcall);
+ if (unlikely(error))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
}
@@ -404,7 +407,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
- struct sk_buff *user_skb; /* to be queued to userspace */
+ struct sk_buff *user_skb = NULL; /* to be queued to userspace */
struct nlattr *nla;
struct genl_info info = {
.dst_sk = ovs_dp_get_net(dp)->genl_sock,
@@ -494,9 +497,11 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
+ user_skb = NULL;
out:
if (err)
skb_tx_error(skb);
+ kfree_skb(user_skb);
kfree_skb(nskb);
return err;
}
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 14c98e48f261..02a86a27fd84 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -158,6 +158,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E39", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+ { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
{ "BCM4752", RFKILL_TYPE_GPS },
{ "LNV4752", RFKILL_TYPE_GPS },
{ },
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index eb71d49e7653..634a2abb5f3a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4243,7 +4243,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
transport = asoc->peer.primary_path;
status.sstat_assoc_id = sctp_assoc2id(asoc);
- status.sstat_state = asoc->state;
+ status.sstat_state = sctp_assoc_to_state(asoc);
status.sstat_rwnd = asoc->peer.rwnd;
status.sstat_unackdata = asoc->unack_data;
diff --git a/net/socket.c b/net/socket.c
index 95ee7d8682e7..2e2586e2dee1 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -734,8 +734,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
}
memset(&tss, 0, sizeof(tss));
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
- skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP) &&
+ if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
@@ -2602,7 +2601,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
*
* This function is called by a protocol handler that wants to
* advertise its address family, and have it linked into the
- * socket interface. The value ops->family coresponds to the
+ * socket interface. The value ops->family corresponds to the
* socket system call protocol family.
*/
int sock_register(const struct net_proto_family *ops)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index b385bcbbf2f5..4d08b398411f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2133,7 +2133,10 @@ sub process {
# Check for improperly formed commit descriptions
if ($in_commit_log &&
$line =~ /\bcommit\s+[0-9a-f]{5,}/i &&
- $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) {
+ !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ ||
+ ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ &&
+ defined $rawlines[$linenr] &&
+ $rawlines[$linenr] =~ /^\s*\("/))) {
$line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i;
my $init_char = $1;
my $orig_commit = lc($2);
diff --git a/tools/usb/usbip/libsrc/usbip_common.h b/tools/usb/usbip/libsrc/usbip_common.h
index 5a0e95edf4df..15fe792e1e96 100644
--- a/tools/usb/usbip/libsrc/usbip_common.h
+++ b/tools/usb/usbip/libsrc/usbip_common.h
@@ -15,7 +15,7 @@
#include <syslog.h>
#include <unistd.h>
#include <linux/usb/ch9.h>
-#include "../../uapi/usbip.h"
+#include <linux/usbip.h>
#ifndef USBIDS_FILE
#define USBIDS_FILE "/usr/share/hwdata/usb.ids"