aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-04-19 04:24:16 +0900
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-04-19 04:24:16 +0900
commit7553c7e68fd144643e74a279ef51b74a15214071 (patch)
tree5ad08f6de205a14eeba1793aa641e6b25c22b2c1 /drivers
parent423de92f56bdfc3e68503af31e6ec041d59a6a25 (diff)
parentc3b46c73264b03000d1e18b22f5caf63332547c9 (diff)
Merge 4.6-rc4 into char-misc-next
We want the fixes in there to build off of for other dependant patches. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_processor.c52
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c1
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/mmp/reset.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c70
-rw-r--r--drivers/clk/qcom/reset.c2
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/rockchip/softrst.c2
-rw-r--r--drivers/clk/sirf/clk-atlas7.c2
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/sunxi/clk-usb.c2
-rw-r--r--drivers/clk/tegra/clk.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c206
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/edma.c63
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/omap-dma.c26
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/extcon/extcon-palmas.c3
-rw-r--r--drivers/firmware/efi/arm-init.c18
-rw-r--r--drivers/firmware/qemu_fw_cfg.c24
-rw-r--r--drivers/gpio/gpio-menz127.c9
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-xgene.c5
-rw-r--r--drivers/gpio/gpiolib.c133
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c14
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c123
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c79
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c79
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c1
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/wacom_sys.c102
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c7
-rw-r--r--drivers/i2c/i2c-core.c10
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c39
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/palm_bk3710.c2
-rw-r--r--drivers/idle/intel_idle.c97
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/max1363.c12
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/health/max30100.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig3
-rw-r--r--drivers/iio/industrialio-buffer.c1
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c39
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c55
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c8
-rw-r--r--drivers/isdn/hisax/isac.c15
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/mailbox/mailbox-test.c16
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/md/bitmap.c21
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c52
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/lkdtm.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/card/block.c18
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c22
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c39
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx.c85
-rw-r--r--drivers/net/dsa/mv88e6xxx.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/cadence/macb.c69
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c196
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c165
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c40
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c91
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/core.c41
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c56
-rw-r--r--drivers/nvme/host/pci.c31
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c35
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c14
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/s390/block/dasd_alias.c226
-rw-r--r--drivers/s390/block/dasd_eckd.c38
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/cxlflash/main.c138
-rw-r--r--drivers/scsi/cxlflash/main.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c33
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/sd.c49
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-omap2-mcspi.c62
-rw-r--r--drivers/spi/spi-rockchip.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c16
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c10
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h10
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c30
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c44
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/olpc_dcon/Kconfig35
-rw-r--r--drivers/staging/olpc_dcon/Makefile6
-rw-r--r--drivers/staging/olpc_dcon/TODO9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c813
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h111
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c205
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c161
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c28
-rw-r--r--drivers/target/target_core_fabric_configfs.c24
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/config.c16
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/dwc2/gadget.c23
-rw-r--r--drivers/usb/dwc3/core.c48
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/gadget/composite.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_midi.c17
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c14
-rw-r--r--drivers/usb/gadget/udc/udc-core.c6
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c72
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c11
-rw-r--r--drivers/xen/events/events_base.c28
384 files changed, 4833 insertions, 2253 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b5e54f2da53d..0d92d0f915e9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+#ifdef CONFIG_X86
+static bool acpi_hwp_native_thermal_lvt_set;
+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
+ u32 lvl,
+ void *context,
+ void **rv)
+{
+ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+ u32 capbuf[2];
+ struct acpi_osc_context osc_context = {
+ .uuid_str = sb_uuid_str,
+ .rev = 1,
+ .cap.length = 8,
+ .cap.pointer = capbuf,
+ };
+
+ if (acpi_hwp_native_thermal_lvt_set)
+ return AE_CTRL_TERMINATE;
+
+ capbuf[0] = 0x0000;
+ capbuf[1] = 0x1000; /* set bit 12 */
+
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
+ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
+ u32 *capbuf_ret = osc_context.ret.pointer;
+
+ if (capbuf_ret[1] & 0x1000) {
+ acpi_handle_info(handle,
+ "_OSC native thermal LVT Acked\n");
+ acpi_hwp_native_thermal_lvt_set = true;
+ }
+ }
+ kfree(osc_context.ret.pointer);
+ }
+
+ return AE_OK;
+}
+
+void __init acpi_early_processor_osc(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL);
+ }
+}
+#endif
+
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0e8567846f1a..c068c829b453 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1019,6 +1019,9 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /* Set capability bits for _OSC under processor scope */
+ acpi_early_processor_osc();
+
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a37508ef66c1..7c188472d9c2 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -145,6 +145,12 @@ void acpi_early_processor_set_pdc(void);
static inline void acpi_early_processor_set_pdc(void) {}
#endif
+#ifdef CONFIG_X86
+void acpi_early_processor_osc(void);
+#else
+static inline void acpi_early_processor_osc(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9ab847a..5fb7718f256c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
return -EEXIST;
}
dev->power.wakeup = ws;
+ if (dev->power.wakeirq)
+ device_wakeup_attach_irq(dev, dev->power.wakeirq);
spin_unlock_irq(&dev->power.lock);
return 0;
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+ q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f7ecc287d733..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err);
return err;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4cad75..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@ struct bm_extent {
#endif
#endif
-/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
- * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+ * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 226eb0c9f0fb..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
- blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+ blk_queue_segment_boundary(q, PAGE_SIZE-1);
if (b) {
struct drbd_connection *connection = first_peer_device(device)->connection;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
+ /*
+ * This bio may be started from the middle of the 'bvec'
+ * because of bio splitting, so offset from the bvec must
+ * be passed to iov iterator
+ */
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c6234428607..94a1843b0426 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1953,7 +1953,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2002,7 +2002,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
- false, GFP_ATOMIC);
+ false, GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2504,7 +2504,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!obj_request->bio_list)
goto out_unwind;
} else if (type == OBJ_REQUEST_PAGES) {
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c2e52864bb03..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
}
}
- pr_err("invalid dram address 0x%x\n", phyaddr);
+ pr_err("invalid dram address %pa\n", &phyaddr);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
- if (priv->bank[i].end > priv->bank[j].base ||
+ if (priv->bank[i].end > priv->bank[j].base &&
priv->bank[i].base < priv->bank[j].end) {
dev_err(priv->dev,
"region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ca9c40309757..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
#define RNG_CTRL 0x00
#define RNG_EN (1 << 0)
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 9e9fe4b19ac4..309049d41f1b 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
return mtk_reset_deassert(rcdev, id);
}
-static struct reset_control_ops mtk_reset_ops = {
+static const struct reset_control_ops mtk_reset_ops = {
.assert = mtk_reset_assert,
.deassert = mtk_reset_deassert,
.reset = mtk_reset,
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b54da1fe73f0..b4e4d6aa2631 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops mmp_clk_reset_ops = {
+static const struct reset_control_ops mmp_clk_reset_ops = {
.assert = mmp_clk_reset_assert,
.deassert = mmp_clk_reset_deassert,
};
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 5428efb9fbf5..3cd1af0af0d9 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -129,20 +129,10 @@ static const char * const gcc_xo_ddr_500_200[] = {
};
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define P_XO 0
-#define FE_PLL_200 1
-#define FE_PLL_500 2
-#define DDRC_PLL_666 3
-
-#define DDRC_PLL_666_SDCC 1
-#define FE_PLL_125_DLY 1
-
-#define FE_PLL_WCSS2G 1
-#define FE_PLL_WCSS5G 1
static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
F(48000000, P_XO, 1, 0, 0),
- F(200000000, FE_PLL_200, 1, 0, 0),
+ F(200000000, P_FEPLL200, 1, 0, 0),
{ }
};
@@ -334,15 +324,15 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
};
static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
- F(1843200, FE_PLL_200, 1, 144, 15625),
- F(3686400, FE_PLL_200, 1, 288, 15625),
- F(7372800, FE_PLL_200, 1, 576, 15625),
- F(14745600, FE_PLL_200, 1, 1152, 15625),
- F(16000000, FE_PLL_200, 1, 2, 25),
+ F(1843200, P_FEPLL200, 1, 144, 15625),
+ F(3686400, P_FEPLL200, 1, 288, 15625),
+ F(7372800, P_FEPLL200, 1, 576, 15625),
+ F(14745600, P_FEPLL200, 1, 1152, 15625),
+ F(16000000, P_FEPLL200, 1, 2, 25),
F(24000000, P_XO, 1, 1, 2),
- F(32000000, FE_PLL_200, 1, 4, 25),
- F(40000000, FE_PLL_200, 1, 1, 5),
- F(46400000, FE_PLL_200, 1, 29, 125),
+ F(32000000, P_FEPLL200, 1, 4, 25),
+ F(40000000, P_FEPLL200, 1, 1, 5),
+ F(46400000, P_FEPLL200, 1, 29, 125),
F(48000000, P_XO, 1, 0, 0),
{ }
};
@@ -410,9 +400,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
};
static const struct freq_tbl ftbl_gcc_gp_clk[] = {
- F(1250000, FE_PLL_200, 1, 16, 0),
- F(2500000, FE_PLL_200, 1, 8, 0),
- F(5000000, FE_PLL_200, 1, 4, 0),
+ F(1250000, P_FEPLL200, 1, 16, 0),
+ F(2500000, P_FEPLL200, 1, 8, 0),
+ F(5000000, P_FEPLL200, 1, 4, 0),
{ }
};
@@ -512,11 +502,11 @@ static struct clk_branch gcc_gp3_clk = {
static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
F(144000, P_XO, 1, 3, 240),
F(400000, P_XO, 1, 1, 0),
- F(20000000, FE_PLL_500, 1, 1, 25),
- F(25000000, FE_PLL_500, 1, 1, 20),
- F(50000000, FE_PLL_500, 1, 1, 10),
- F(100000000, FE_PLL_500, 1, 1, 5),
- F(193000000, DDRC_PLL_666_SDCC, 1, 0, 0),
+ F(20000000, P_FEPLL500, 1, 1, 25),
+ F(25000000, P_FEPLL500, 1, 1, 20),
+ F(50000000, P_FEPLL500, 1, 1, 10),
+ F(100000000, P_FEPLL500, 1, 1, 5),
+ F(193000000, P_DDRPLL, 1, 0, 0),
{ }
};
@@ -536,9 +526,9 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
static const struct freq_tbl ftbl_gcc_apps_clk[] = {
F(48000000, P_XO, 1, 0, 0),
- F(200000000, FE_PLL_200, 1, 0, 0),
- F(500000000, FE_PLL_500, 1, 0, 0),
- F(626000000, DDRC_PLL_666, 1, 0, 0),
+ F(200000000, P_FEPLL200, 1, 0, 0),
+ F(500000000, P_FEPLL500, 1, 0, 0),
+ F(626000000, P_DDRPLLAPSS, 1, 0, 0),
{ }
};
@@ -557,7 +547,7 @@ static struct clk_rcg2 apps_clk_src = {
static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
F(48000000, P_XO, 1, 0, 0),
- F(100000000, FE_PLL_200, 2, 0, 0),
+ F(100000000, P_FEPLL200, 2, 0, 0),
{ }
};
@@ -940,7 +930,7 @@ static struct clk_branch gcc_usb2_mock_utmi_clk = {
};
static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
- F(2000000, FE_PLL_200, 10, 0, 0),
+ F(2000000, P_FEPLL200, 10, 0, 0),
{ }
};
@@ -1007,7 +997,7 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = {
};
static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
- F(125000000, FE_PLL_125_DLY, 1, 0, 0),
+ F(125000000, P_FEPLL125DLY, 1, 0, 0),
{ }
};
@@ -1027,7 +1017,7 @@ static struct clk_rcg2 fephy_125m_dly_clk_src = {
static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
F(48000000, P_XO, 1, 0, 0),
- F(250000000, FE_PLL_WCSS2G, 1, 0, 0),
+ F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
{ }
};
@@ -1097,7 +1087,7 @@ static struct clk_branch gcc_wcss2g_rtc_clk = {
static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
F(48000000, P_XO, 1, 0, 0),
- F(250000000, FE_PLL_WCSS5G, 1, 0, 0),
+ F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
{ }
};
@@ -1325,6 +1315,16 @@ MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table);
static int gcc_ipq4019_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+
+ clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
+
return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
}
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 6c977d3a8590..0324d8daab9b 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
return regmap_update_bits(rst->regmap, map->reg, mask, 0);
}
-struct reset_control_ops qcom_reset_ops = {
+const struct reset_control_ops qcom_reset_ops = {
.reset = qcom_reset,
.assert = qcom_reset_assert,
.deassert = qcom_reset_deassert,
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 0e11e2130f97..cda877927d43 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -32,6 +32,6 @@ struct qcom_reset_controller {
#define to_qcom_reset_controller(r) \
container_of(r, struct qcom_reset_controller, rcdev);
-extern struct reset_control_ops qcom_reset_ops;
+extern const struct reset_control_ops qcom_reset_ops;
#endif
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 552f7bb15bc5..21218987bbc3 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops rockchip_softrst_ops = {
+static const struct reset_control_ops rockchip_softrst_ops = {
.assert = rockchip_softrst_assert,
.deassert = rockchip_softrst_deassert,
};
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index 957aae63e7cc..d0c6c9a2d06a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops atlas7_rst_ops = {
+static const struct reset_control_ops atlas7_rst_ops = {
.reset = atlas7_reset_module,
};
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index 044c1717b762..d9ea22ec4e25 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_ve_reset_ops = {
+static const struct reset_control_ops sunxi_ve_reset_ops = {
.assert = sunxi_ve_reset_assert,
.deassert = sunxi_ve_reset_deassert,
};
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a9b176139aca..028dd832a39f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sun9i_mmc_reset_ops = {
+static const struct reset_control_ops sun9i_mmc_reset_ops = {
.assert = sun9i_mmc_reset_assert,
.deassert = sun9i_mmc_reset_deassert,
};
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 5432b1c198a4..fe0c3d169377 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_usb_reset_ops = {
+static const struct reset_control_ops sunxi_usb_reset_ops = {
.assert = sunxi_usb_reset_assert,
.deassert = sunxi_usb_reset_deassert,
};
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 2a3a4fe803d6..f60fe2e344ca 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
}
}
-static struct reset_control_ops rst_ops = {
+static const struct reset_control_ops rst_ops = {
.assert = tegra_clk_rst_assert,
.deassert = tegra_clk_rst_deassert,
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..5f8dbe640a20 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
* Copyright (C) 2014 Linaro.
* Viresh Kumar <viresh.kumar@linaro.org>
*
- * The OPP code in function set_target() is reused from
- * drivers/cpufreq/omap-cpufreq.c
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b644526fd59..8b5a415ee14a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
+/**
+ * struct sample - Store performance sample
+ * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
+ * performance during last sample period
+ * @busy_scaled: Scaled busy value which is used to calculate next
+ * P state. This can be different than core_pct_busy
+ * to account for cpu idle period
+ * @aperf: Difference of actual performance frequency clock count
+ * read from APERF MSR between last and current sample
+ * @mperf: Difference of maximum performance frequency clock count
+ * read from MPERF MSR between last and current sample
+ * @tsc: Difference of time stamp counter between last and
+ * current sample
+ * @freq: Effective frequency calculated from APERF/MPERF
+ * @time: Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
struct sample {
int32_t core_pct_busy;
int32_t busy_scaled;
@@ -74,6 +93,20 @@ struct sample {
u64 time;
};
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate: Current requested P state
+ * @min_pstate: Min P state possible for this platform
+ * @max_pstate: Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ * This can be higher than the max_pstate which can
+ * be limited by platform thermal design power limits
+ * @scaling: Scaling factor to convert frequency to cpufreq
+ * frequency units
+ * @turbo_pstate: Max Turbo P state possible for this platform
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
struct pstate_data {
int current_pstate;
int min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
int turbo_pstate;
};
+/**
+ * struct vid_data - Stores voltage information data
+ * @min: VID data for this platform corresponding to
+ * the lowest P state
+ * @max: VID data corresponding to the highest P State.
+ * @turbo: VID data for turbo P state
+ * @ratio: Ratio of (vid max - vid min) /
+ * (max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
struct vid_data {
int min;
int max;
@@ -90,6 +136,18 @@ struct vid_data {
int32_t ratio;
};
+/**
+ * struct _pid - Stores PID data
+ * @setpoint: Target set point for busyness or performance
+ * @integral: Storage for accumulated error values
+ * @p_gain: PID proportional gain
+ * @i_gain: PID integral gain
+ * @d_gain: PID derivative gain
+ * @deadband: PID deadband
+ * @last_err: Last error storage for integral part of PID calculation
+ *
+ * Stores PID coefficients and last error for PID controller.
+ */
struct _pid {
int setpoint;
int32_t integral;
@@ -100,6 +158,23 @@ struct _pid {
int32_t last_err;
};
+/**
+ * struct cpudata - Per CPU instance data storage
+ * @cpu: CPU number for this instance data
+ * @update_util: CPUFreq utility callback information
+ * @pstate: Stores P state limits for this CPU
+ * @vid: Stores VID limits for this CPU
+ * @pid: Stores PID parameters for this CPU
+ * @last_sample_time: Last Sample time
+ * @prev_aperf: Last APERF value read from APERF MSR
+ * @prev_mperf: Last MPERF value read from MPERF MSR
+ * @prev_tsc: Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ * current sample
+ * @sample: Storage for storing last Sample data
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
struct cpudata {
int cpu;
@@ -118,6 +193,19 @@ struct cpudata {
};
static struct cpudata **all_cpu_data;
+
+/**
+ * struct pid_adjust_policy - Stores static PID configuration data
+ * @sample_rate_ms: PID calculation sample rate in ms
+ * @sample_rate_ns: Sample rate calculation in ns
+ * @deadband: PID deadband
+ * @setpoint: PID Setpoint
+ * @p_gain_pct: PID proportional gain
+ * @i_gain_pct: PID integral gain
+ * @d_gain_pct: PID derivative gain
+ *
+ * Stores per CPU model static PID configuration data.
+ */
struct pstate_adjust_policy {
int sample_rate_ms;
s64 sample_rate_ns;
@@ -128,6 +216,20 @@ struct pstate_adjust_policy {
int i_gain_pct;
};
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max: Callback to get maximum non turbo effective P state
+ * @get_max_physical: Callback to get maximum non turbo physical P state
+ * @get_min: Callback to get minimum P state
+ * @get_turbo: Callback to get turbo P state
+ * @get_scaling: Callback to get frequency scaling factor
+ * @get_val: Callback to convert P state to actual MSR write value
+ * @get_vid: Callback to get VID data for Atom platforms
+ * @get_target_pstate: Callback to a function to calculate next P state to use
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
struct pstate_funcs {
int (*get_max)(void);
int (*get_max_physical)(void);
@@ -139,6 +241,11 @@ struct pstate_funcs {
int32_t (*get_target_pstate)(struct cpudata *);
};
+/**
+ * struct cpu_defaults- Per CPU model default config data
+ * @pid_policy: PID config data
+ * @funcs: Callback function data
+ */
struct cpu_defaults {
struct pstate_adjust_policy pid_policy;
struct pstate_funcs funcs;
@@ -151,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;
static int hwp_active;
+
+/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo: User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled: Platform turbo status either from msr
+ * MSR_IA32_MISC_ENABLE or when maximum available pstate
+ * matches the maximum turbo pstate
+ * @max_perf_pct: Effective maximum performance limit in percentage, this
+ * is minimum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct: Effective minimum performance limit in percentage, this
+ * is maximum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
+ * This value is used to limit max pstate
+ * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
+ * This value is used to limit min pstate
+ * @max_policy_pct: The maximum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @max_sysfs_pct: The maximum performance in percentage enforced by
+ * intel pstate sysfs interface
+ * @min_policy_pct: The minimum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @min_sysfs_pct: The minimum performance in percentage enforced by
+ * intel pstate sysfs interface
+ *
+ * Storage for user and policy defined limits.
+ */
struct perf_limits {
int no_turbo;
int turbo_disabled;
@@ -910,7 +1045,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc;
- return true;
+ /*
+ * First time this function is invoked in a given cycle, all of the
+ * previous sample data fields are equal to zero or stale and they must
+ * be populated with meaningful numbers for things to work, so assume
+ * that sample.time will always be reset before setting the utilization
+ * update hook and make the caller skip the sample then.
+ */
+ return !!cpu->last_sample_time;
}
static inline int32_t get_avg_frequency(struct cpudata *cpu)
@@ -984,8 +1126,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
* enough period of time to adjust our busyness.
*/
duration_ns = cpu->sample.time - cpu->last_sample_time;
- if ((s64)duration_ns > pid_params.sample_rate_ns * 3
- && cpu->last_sample_time > 0) {
+ if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio);
@@ -1100,10 +1241,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_get_cpu_pstates(cpu);
intel_pstate_busy_pid_reset(cpu);
- intel_pstate_sample(cpu, 0);
cpu->update_util.func = intel_pstate_update_util;
- cpufreq_set_update_util_data(cpunum, &cpu->update_util);
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
@@ -1122,22 +1261,54 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
return get_avg_frequency(cpu);
}
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+ struct cpudata *cpu = all_cpu_data[cpu_num];
+
+ /* Prevent intel_pstate_update_util() from using stale data. */
+ cpu->sample.time = 0;
+ cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+ cpufreq_set_update_util_data(cpu, NULL);
+ synchronize_sched();
+}
+
+static void intel_pstate_set_performance_limits(struct perf_limits *limits)
+{
+ limits->no_turbo = 0;
+ limits->turbo_disabled = 0;
+ limits->max_perf_pct = 100;
+ limits->max_perf = int_tofp(1);
+ limits->min_perf_pct = 100;
+ limits->min_perf = int_tofp(1);
+ limits->max_policy_pct = 100;
+ limits->max_sysfs_pct = 100;
+ limits->min_policy_pct = 0;
+ limits->min_sysfs_pct = 0;
+}
+
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
- policy->max >= policy->cpuinfo.max_freq) {
- pr_debug("intel_pstate: set performance\n");
+ intel_pstate_clear_update_util_hook(policy->cpu);
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
- if (hwp_active)
- intel_pstate_hwp_set(policy->cpus);
- return 0;
+ if (policy->max >= policy->cpuinfo.max_freq) {
+ pr_debug("intel_pstate: set performance\n");
+ intel_pstate_set_performance_limits(limits);
+ goto out;
+ }
+ } else {
+ pr_debug("intel_pstate: set powersave\n");
+ limits = &powersave_limits;
}
- pr_debug("intel_pstate: set powersave\n");
- limits = &powersave_limits;
limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1163,6 +1334,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
int_tofp(100));
+ out:
+ intel_pstate_set_update_util_hook(policy->cpu);
+
if (hwp_active)
intel_pstate_hwp_set(policy->cpus);
@@ -1187,8 +1361,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
- cpufreq_set_update_util_data(cpu_num, NULL);
- synchronize_sched();
+ intel_pstate_clear_update_util_hook(cpu_num);
if (hwp_active)
return;
@@ -1455,8 +1628,7 @@ out:
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
- cpufreq_set_update_util_data(cpu, NULL);
- synchronize_sched();
+ intel_pstate_clear_update_util_hook(cpu);
kfree(all_cpu_data[cpu]);
}
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
- } else {
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- }
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (!dws || dws->dma_dev != chan->device->dev)
+ if (dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ /*
+ * We need controller-specific data to set up slave transfers.
+ */
+ if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+ return -EINVAL;
+ }
+
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+
+ /* Clear custom channel configuration */
+ dwc->src_id = 0;
+ dwc->dst_id = 0;
+
+ dwc->src_master = 0;
+ dwc->dst_master = 0;
+
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e774f8..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width;
+ bool use_intermediate = false;
u32 burst;
int i, ret, nslots;
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early.
*/
- if (nslots > MAX_NR_SG)
- return NULL;
+ if (nslots > MAX_NR_SG) {
+ /*
+ * If the burst and period sizes are the same, we can put
+ * the full buffer into a single period and activate
+ * intermediate interrupts. This will produce interrupts
+ * after each burst, which is also after each desired period.
+ */
+ if (burst == period_len) {
+ period_len = buf_len;
+ nslots = 2;
+ use_intermediate = true;
+ } else {
+ return NULL;
+ }
+ }
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/*
* Enable period interrupt only if it is requested
*/
- if (tx_flags & DMA_PREP_INTERRUPT)
+ if (tx_flags & DMA_PREP_INTERRUPT) {
edesc->pset[i].param.opt |= TCINTEN;
+
+ /* Also enable intermediate interrupts if necessary */
+ if (use_intermediate)
+ edesc->pset[i].param.opt |= ITCINTEN;
+ }
}
/* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
- struct platform_device *tc_pdev;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !tc)
- return;
-
- tc_pdev = of_find_device_by_node(tc->node);
- if (!tc_pdev) {
- pr_err("%s: TPTC device is not found\n", __func__);
- return;
- }
- if (!pm_runtime_enabled(&tc_pdev->dev))
- pm_runtime_enable(&tc_pdev->dev);
-
- if (enable)
- ret = pm_runtime_get_sync(&tc_pdev->dev);
- else
- ret = pm_runtime_put_sync(&tc_pdev->dev);
-
- if (ret < 0)
- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
- enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
echan->hw_triggered ? "HW" : "SW");
- edma_tc_set_pm_state(echan->tc, true);
-
return 0;
err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- edma_tc_set_pm_state(echan->tc, false);
echan->tc = NULL;
echan->hw_triggered = false;
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
int i;
for (i = 0; i < ecc->num_channels; i++) {
- if (echan[i].alloced) {
+ if (echan[i].alloced)
edma_setup_interrupt(&echan[i], false);
- edma_tc_set_pm_state(echan[i].tc, false);
- }
}
return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
/* Set up channel -> slot mapping for the entry slot */
edma_set_chmap(&echan[i], echan[i].slot[0]);
-
- edma_tc_set_pm_state(echan[i].tc, true);
}
}
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
static int edma_tptc_probe(struct platform_device *pdev)
{
- return 0;
+ pm_runtime_enable(&pdev->dev);
+ return pm_runtime_get_sync(&pdev->dev);
}
static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
- mtsr = config->dst_addr_width;
+ mtsr = config->src_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
- mtsr = config->src_addr_width;
+ mtsr = config->dst_addr_width;
}
hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr;
+ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = desc->length;
+ size_t bytes = 0;
int i;
- i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ i = HSU_DMA_CHAN_NR_DESC - 1;
do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
+#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 43bd5aee7ffe..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
unsigned dma_sig;
bool cyclic;
bool paused;
+ bool running;
int dma_ch;
struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+ c->running = true;
}
static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
+
+ c->running = false;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
- uint32_t ccr;
unsigned long flags;
- ccr = omap_dma_chan_read(c, CCR);
- /* The channel is no longer active, handle the completion right away */
- if (!(ccr & CCR_ENABLE))
- omap_dma_callback(c->dma_ch, 0, c);
-
ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (!c->paused && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly
+ */
+ if (!(ccr & CCR_ENABLE))
+ ret = DMA_COMPLETE;
+ }
+
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE;
- if (tx_flags & DMA_PREP_INTERRUPT)
- d->cicr |= CICR_FRAME_IE;
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
d->csdp = data_type;
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0ee0321868d3..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 841a4b586395..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING |
- IRQF_ONESHOT |
- IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_vbus",
palmas_usb);
if (status < 0) {
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index aa1f743152a2..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -203,7 +203,19 @@ void __init efi_init(void)
reserve_regions();
early_memunmap(memmap.map, params.mmap_size);
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
+
+ if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * ARM currently does not allow ioremap_cache() to be called on
+ * memory regions that are covered by struct page. So remove the
+ * UEFI memory map from the linear mapping.
+ */
+ memblock_mark_nomap(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ } else {
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ }
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index fedbff55a7f3..815c4a5cae54 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,12 +77,28 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
static inline void fw_cfg_read_blob(u16 key,
void *buf, loff_t pos, size_t count)
{
+ u32 glk;
+ acpi_status status;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+ memset(buf, 0, count);
+ return;
+ }
+
mutex_lock(&fw_cfg_dev_lock);
iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
while (pos-- > 0)
ioread8(fw_cfg_reg_data);
ioread8_rep(fw_cfg_reg_data, buf, count);
mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
}
/* clean up fw_cfg device i/o */
@@ -727,12 +743,18 @@ device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
static int __init fw_cfg_sysfs_init(void)
{
+ int ret;
+
/* create /sys/firmware/qemu_fw_cfg/ top level directory */
fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
if (!fw_cfg_top_ko)
return -ENOMEM;
- return platform_driver_register(&fw_cfg_sysfs_driver);
+ ret = platform_driver_register(&fw_cfg_sysfs_driver);
+ if (ret)
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+ return ret;
}
static void __exit fw_cfg_sysfs_exit(void)
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a68e199d579d..c5c9599a3a71 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -37,7 +37,6 @@ struct men_z127_gpio {
void __iomem *reg_base;
struct mcb_device *mdev;
struct resource *mem;
- spinlock_t lock;
};
static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
@@ -69,7 +68,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
debounce /= 50;
}
- spin_lock(&priv->lock);
+ spin_lock(&gc->bgpio_lock);
db_en = readl(priv->reg_base + MEN_Z127_DBER);
@@ -84,7 +83,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
writel(db_en, priv->reg_base + MEN_Z127_DBER);
writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
- spin_unlock(&priv->lock);
+ spin_unlock(&gc->bgpio_lock);
return 0;
}
@@ -97,7 +96,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
if (gpio_pin >= gc->ngpio)
return -EINVAL;
- spin_lock(&priv->lock);
+ spin_lock(&gc->bgpio_lock);
od_en = readl(priv->reg_base + MEN_Z127_ODER);
if (gpiochip_line_is_open_drain(gc, gpio_pin))
@@ -106,7 +105,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
od_en &= ~BIT(gpio_pin);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
- spin_unlock(&priv->lock);
+ spin_unlock(&gc->bgpio_lock);
return 0;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d0d3065a7557..e66084c295fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/platform_data/pca953x.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
- reg << 1, (u16) *val);
+ reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
break;
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78664b8..76ac906b4d78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (!ret)
- return 0;
+ if (ret)
+ return ret;
spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index c0aa387664bf..0dc916191689 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err;
+ }
+
gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!gpio->base) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 72065532c1c7..b747c76fd2b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -68,6 +68,7 @@ LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static bool gpiolib_initialized;
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
@@ -440,9 +441,63 @@ static void gpiodevice_release(struct device *dev)
cdev_del(&gdev->chrdev);
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
+ kfree(gdev->label);
+ kfree(gdev->descs);
kfree(gdev);
}
+static int gpiochip_setup_dev(struct gpio_device *gdev)
+{
+ int status;
+
+ cdev_init(&gdev->chrdev, &gpio_fileops);
+ gdev->chrdev.owner = THIS_MODULE;
+ gdev->chrdev.kobj.parent = &gdev->dev.kobj;
+ gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
+ status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
+ if (status < 0)
+ chip_warn(gdev->chip, "failed to add char device %d:%d\n",
+ MAJOR(gpio_devt), gdev->id);
+ else
+ chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
+ MAJOR(gpio_devt), gdev->id);
+ status = device_add(&gdev->dev);
+ if (status)
+ goto err_remove_chardev;
+
+ status = gpiochip_sysfs_register(gdev);
+ if (status)
+ goto err_remove_device;
+
+ /* From this point, the .release() function cleans up gpio_device */
+ gdev->dev.release = gpiodevice_release;
+ get_device(&gdev->dev);
+ pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
+ __func__, gdev->base, gdev->base + gdev->ngpio - 1,
+ dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+
+ return 0;
+
+err_remove_device:
+ device_del(&gdev->dev);
+err_remove_chardev:
+ cdev_del(&gdev->chrdev);
+ return status;
+}
+
+static void gpiochip_setup_devs(void)
+{
+ struct gpio_device *gdev;
+ int err;
+
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ err = gpiochip_setup_dev(gdev);
+ if (err)
+ pr_err("%s: Failed to initialize gpio device (%d)\n",
+ dev_name(&gdev->dev), err);
+ }
+}
+
/**
* gpiochip_add_data() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
@@ -457,6 +512,9 @@ static void gpiodevice_release(struct device *dev)
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
* for GPIOs will fail rudely.
*
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * ie after core_initcall().
+ *
* If chip->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
*/
@@ -504,8 +562,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
else
gdev->owner = THIS_MODULE;
- gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio,
- sizeof(gdev->descs[0]), GFP_KERNEL);
+ gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
status = -ENOMEM;
goto err_free_gdev;
@@ -514,16 +571,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
if (chip->ngpio == 0) {
chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
status = -EINVAL;
- goto err_free_gdev;
+ goto err_free_descs;
}
if (chip->label)
- gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL);
+ gdev->label = kstrdup(chip->label, GFP_KERNEL);
else
- gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL);
+ gdev->label = kstrdup("unknown", GFP_KERNEL);
if (!gdev->label) {
status = -ENOMEM;
- goto err_free_gdev;
+ goto err_free_descs;
}
gdev->ngpio = chip->ngpio;
@@ -543,7 +600,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
if (base < 0) {
status = base;
spin_unlock_irqrestore(&gpio_lock, flags);
- goto err_free_gdev;
+ goto err_free_label;
}
/*
* TODO: it should not be necessary to reflect the assigned
@@ -558,7 +615,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
status = gpiodev_add_to_list(gdev);
if (status) {
spin_unlock_irqrestore(&gpio_lock, flags);
- goto err_free_gdev;
+ goto err_free_label;
}
for (i = 0; i < chip->ngpio; i++) {
@@ -596,39 +653,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
* we get a device node entry in sysfs under
* /sys/bus/gpio/devices/gpiochipN/dev that can be used for
* coldplug of device nodes and other udev business.
+ * We can do this only if gpiolib has been initialized.
+ * Otherwise, defer until later.
*/
- cdev_init(&gdev->chrdev, &gpio_fileops);
- gdev->chrdev.owner = THIS_MODULE;
- gdev->chrdev.kobj.parent = &gdev->dev.kobj;
- gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
- status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
- if (status < 0)
- chip_warn(chip, "failed to add char device %d:%d\n",
- MAJOR(gpio_devt), gdev->id);
- else
- chip_dbg(chip, "added GPIO chardev (%d:%d)\n",
- MAJOR(gpio_devt), gdev->id);
- status = device_add(&gdev->dev);
- if (status)
- goto err_remove_chardev;
-
- status = gpiochip_sysfs_register(gdev);
- if (status)
- goto err_remove_device;
-
- /* From this point, the .release() function cleans up gpio_device */
- gdev->dev.release = gpiodevice_release;
- get_device(&gdev->dev);
- pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
- __func__, gdev->base, gdev->base + gdev->ngpio - 1,
- dev_name(&gdev->dev), chip->label ? : "generic");
-
+ if (gpiolib_initialized) {
+ status = gpiochip_setup_dev(gdev);
+ if (status)
+ goto err_remove_chip;
+ }
return 0;
-err_remove_device:
- device_del(&gdev->dev);
-err_remove_chardev:
- cdev_del(&gdev->chrdev);
err_remove_chip:
acpi_gpiochip_remove(chip);
gpiochip_free_hogs(chip);
@@ -637,6 +671,10 @@ err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
+err_free_label:
+ kfree(gdev->label);
+err_free_descs:
+ kfree(gdev->descs);
err_free_gdev:
ida_simple_remove(&gpio_ida, gdev->id);
/* failures here can mean systems won't boot... */
@@ -2231,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
return desc;
}
-static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *acpi_find_gpio(struct device *dev,
+ const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags)
+ enum gpiod_flags flags,
+ enum gpio_lookup_flags *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
@@ -2264,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
if (IS_ERR(desc))
return desc;
+
+ if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
+ info.gpioint) {
+ dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ return ERR_PTR(-ENOENT);
+ }
}
if (info.polarity == GPIO_ACTIVE_LOW)
- *flags |= GPIO_ACTIVE_LOW;
+ *lookupflags |= GPIO_ACTIVE_LOW;
return desc;
}
@@ -2530,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
desc = of_find_gpio(dev, con_id, idx, &lookupflags);
} else if (ACPI_COMPANION(dev)) {
dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+ desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
}
}
@@ -2829,6 +2875,9 @@ static int __init gpiolib_dev_init(void)
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
bus_unregister(&gpio_bus_type);
+ } else {
+ gpiolib_initialized = true;
+ gpiochip_setup_devs();
}
return ret;
}
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0f734ee05274..ca77ec10147c 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,10 +1,14 @@
-menu "ACP Configuration"
+menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
- bool "Enable ACP IP support"
+ bool "Enable AMD Audio CoProcessor IP support"
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
Choose this option to enable ACP IP support for AMD SOCs.
+ This adds the ACP (Audio CoProcessor) IP driver and wires
+ it up into the amdgpu driver. The ACP block provides the DMA
+ engine for the i2s-based ALSA driver. It is required for audio
+ on APUs which utilize an i2s codec.
endmenu
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c4a21c6428f5..b77489dec6e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -2033,6 +2034,7 @@ struct amdgpu_device {
/* tracking pinned memory */
u64 vram_pin_size;
+ u64 invisible_pin_size;
u64 gart_pin_size;
/* amdkfd interface */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101e10c6..6043dc7c3a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
+ struct cgs_mode_info *mode_info;
if (info == NULL)
return -EINVAL;
+ mode_info = info->mode_info;
+
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
info->display_count++;
}
- if (info->mode_info != NULL &&
+ if (mode_info != NULL &&
crtc->enabled && amdgpu_crtc->enabled &&
amdgpu_crtc->hw_mode.clock) {
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2);
- info->mode_info->vblank_time_us = vblank_lines * line_time_us;
- info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- info->mode_info->ref_clock = adev->clock.spll.reference_freq;
- info->mode_info++;
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
}
}
}
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
return 0;
}
+
+static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+{
+ CGS_FUNC_ADEV;
+
+ adev->pm.dpm_enabled = enabled;
+
+ return 0;
+}
+
/** \brief evaluate acpi namespace object, handle or pathname must be valid
* \param cgs_device
* \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
+ amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f0ed974bd4e0..3fb405b3a614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(*f);
+ fence_put(fence);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4303b447efe8..d81f1f4883a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct fence **ptr;
+ struct fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
- BUG_ON(rcu_dereference_protected(*ptr, 1));
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && !fence_is_signaled(old)) {
+ DRM_INFO("rcu slot is busy\n");
+ fence_wait(old, false);
+ }
rcu_assign_pointer(*ptr, fence_get(&fence->base));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfaa97e5..762cfdb85147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
return r;
}
+ adev->ddev->vblank_disable_allowed = true;
+
/* enable msi */
adev->irq.msi_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7805a8706af7..aef70db16832 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -382,8 +382,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->mc.real_vram_size;
+ vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+ vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mc.gtt_size;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 151a2d42c639..e557fc1f17c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -476,6 +480,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
}
+static const char *amdgpu_vram_names[] = {
+ "UNKNOWN",
+ "GDDR1",
+ "DDR2",
+ "GDDR3",
+ "GDDR4",
+ "GDDR5",
+ "HBM",
+ "DDR3"
+};
+
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* Add an MTRR for the VRAM */
@@ -484,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->mc.mc_vram_size >> 20,
(unsigned long long)adev->mc.aper_size >> 20);
- DRM_INFO("RAM width %dbits DDR\n",
- adev->mc.vram_width);
+ DRM_INFO("RAM width %dbits %s\n",
+ adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
return amdgpu_ttm_init(adev);
}
@@ -608,6 +623,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
+ /* Can't move a pinned BO to visible VRAM */
+ if (abo->pin_count > 0)
+ return -EINVAL;
+
/* hurrah the memory is not visible ! */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c413c7..e9c6ae6ed2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
+ if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- if (amdgpu_dpm == 0)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- }
+ if (adev->pp_enabled)
+ adev->pm.dpm_enabled = true;
#endif
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab34190859a8..6f3369de232f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -384,9 +384,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
+ struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ /* Can't move a pinned BO */
+ abo = container_of(bo, struct amdgpu_bo, tbo);
+ if (WARN_ON_ONCE(abo->pin_count > 0))
+ return -EINVAL;
+
adev = amdgpu_get_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
@@ -616,7 +622,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c1a581044417..338da80006b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -241,32 +241,28 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->uvd.ring;
- int i, r;
+ unsigned size;
+ void *ptr;
+ int i;
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- uint32_t handle = atomic_read(&adev->uvd.handles[i]);
- if (handle != 0) {
- struct fence *fence;
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- amdgpu_uvd_note_usage(adev);
+ if (i == AMDGPU_MAX_UVD_HANDLES)
+ return 0;
- r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
- if (r) {
- DRM_ERROR("Error destroying UVD (%d)!\n", r);
- continue;
- }
+ size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+ ptr = adev->uvd.cpu_addr;
- fence_wait(fence, false);
- fence_put(fence);
+ adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+ if (!adev->uvd.saved_bo)
+ return -ENOMEM;
- adev->uvd.filp[i] = NULL;
- atomic_set(&adev->uvd.handles[i], 0);
- }
- }
+ memcpy(adev->uvd.saved_bo, ptr, size);
return 0;
}
@@ -275,23 +271,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- const struct common_firmware_header *hdr;
- unsigned offset;
if (adev->uvd.vcpu_bo == NULL)
return -EINVAL;
- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- (adev->uvd.fw->size) - offset);
-
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr = adev->uvd.cpu_addr;
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- memset(ptr, 0, size);
+ if (adev->uvd.saved_bo != NULL) {
+ memcpy(ptr, adev->uvd.saved_bo, size);
+ kfree(adev->uvd.saved_bo);
+ adev->uvd.saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned offset;
+
+ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+ (adev->uvd.fw->size) - offset);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ memset(ptr, 0, size);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 82ce7d943884..05b0353d3880 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -927,6 +919,14 @@ static int gmc_v7_0_sw_init(void *handle)
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 29bd7b57dc91..02deb3229405 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -881,12 +873,27 @@ static int gmc_v8_0_late_init(void *handle)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_FIJI)
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
- amdgpu_irq_add_domain(adev);
+ amdgpu_irq_remove_domain(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c606ccb38d8b..cb463753115b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
- r = uvd_v4_2_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e3c852d9d79a..16476d80f475 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
- r = uvd_v5_0_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3375e614ac67..d49379145ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ r = uvd_v6_0_hw_fini(adev);
+ if (r)
+ return r;
+
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
}
- r = uvd_v6_0_hw_fini(adev);
- if (r)
- return r;
return r;
}
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc3834f..ab84d4947247 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
void *cgs_device,
struct cgs_display_info *info);
+typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+
typedef int (*cgs_call_acpi_method)(void *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
cgs_set_clockgating_state set_clockgating_state;
/* display manager */
cgs_get_active_displays_info get_active_displays_info;
+ /* notify dpm enabled */
+ cgs_notify_dpm_enabled notify_dpm_enabled;
/* ACPI */
cgs_call_acpi_method call_acpi_method;
/* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
CGS_CALL(set_clockgating_state, dev, block_type, state)
+#define cgs_notify_dpm_enabled(dev, enabled) \
+ CGS_CALL(notify_dpm_enabled, dev, enabled)
+
#define cgs_get_active_displays_info(dev, info) \
CGS_CALL(get_active_displays_info, dev, info)
+
#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
#define cgs_query_system_info(dev, sys_info) \
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78cb404..56856a2864d1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
reset_display_configCounter_tasks,
update_dal_configuration_tasks,
vari_bright_resume_tasks,
- block_adjust_power_state_tasks,
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
enable_dynamic_state_management_tasks,
enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
- reset_boot_state_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
adjust_power_state_tasks,
enable_disable_fps_tasks,
notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 51dedf84623c..89f31bc5b68b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
for(count = 0; count < table->VceLevelCount; count++) {
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
table->VceLevel[count].MinVoltage |=
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
for (count = 0; count < table->SamuLevelCount; count++) {
/* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
table->UvdBootLevel = 0;
for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
fiji_populate_smc_voltage_tables(hwmgr, table);
+ table->SystemFlags = 0;
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition))
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
result = fiji_populate_vr_config(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
@@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
+ const struct fiji_performance_level *pl2)
+{
+ return ((pl1->memory_clock == pl2->memory_clock) &&
+ (pl1->engine_clock == pl2->engine_clock) &&
+ (pl1->pcie_gen == pl2->pcie_gen) &&
+ (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+ const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
+ const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
+ int i;
+
+ if (equal == NULL || psa == NULL || psb == NULL)
+ return -EINVAL;
+
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < psa->performance_level_count; i++) {
+ if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair that is different the states are different. */
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+ *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+ *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+ *equal &= (psa->acp_clk == psb->acp_clk);
+
+ return 0;
+}
+
+bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0,0,NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+ if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+ if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+ is_update_required = true;
+*/
+ return is_update_required;
+}
+
+
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
@@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
.set_fan_control_mode = fiji_set_fan_control_mode,
.get_fan_control_mode = fiji_get_fan_control_mode,
+ .check_states_equal = fiji_check_states_equal,
+ .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
.get_pp_table = fiji_get_pp_table,
.set_pp_table = fiji_set_pp_table,
.force_clock_level = fiji_force_clock_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index be31bed2538a..fa208ada6892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
+ int ret = 1;
+ bool enabled;
PHM_FUNC_CHECK(hwmgr);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface)) {
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
- return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
+ ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
} else {
- return phm_dispatch_table(hwmgr,
+ ret = phm_dispatch_table(hwmgr,
&(hwmgr->enable_dynamic_state_management),
NULL, NULL);
}
- return 0;
+
+ enabled = ret == 0 ? true : false;
+
+ cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+ return ret;
}
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 56b829f97699..3ac1ae4d8caf 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
DRM_ERROR("failed to map control registers area\n");
ret = PTR_ERR(hdlcd->mmio);
hdlcd->mmio = NULL;
- goto fail;
+ return ret;
}
version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
DRM_ERROR("unknown product id: 0x%x\n", version);
- ret = -EINVAL;
- goto fail;
+ return -EINVAL;
}
DRM_INFO("found ARM HDLCD version r%dp%d\n",
(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
- goto fail;
+ return ret;
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
if (ret)
@@ -101,8 +100,6 @@ irq_fail:
drm_crtc_cleanup(&hdlcd->crtc);
setup_fail:
of_reserved_mem_device_release(drm->dev);
-fail:
- devm_clk_put(drm->dev, hdlcd->clk);
return ret;
}
@@ -412,7 +409,6 @@ err_unload:
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
- devm_clk_put(dev, hdlcd->clk);
err_free:
drm_dev_unref(drm);
@@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
- if (!IS_ERR(hdlcd->clk)) {
- devm_clk_put(drm->dev, hdlcd->clk);
- hdlcd->clk = NULL;
- }
drm_mode_config_cleanup(drm);
drm_dev_unregister(drm);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
release:
for_each_sg(sgt->sgl, sg, num, i)
- page_cache_release(sg_page(sg));
+ put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
- page_cache_release(sg_page(sg));
+ put_page(sg_page(sg));
}
sg_free_table(sgt);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7d58f594cffe..df64ed1c0139 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
- int err;
+ int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
+ mutex_lock(&aux->hw_mutex);
+
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
- mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
- return err;
+ goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
- return -EPROTO;
- return err;
+ err = -EPROTO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
- return -EIO;
+ err = -EIO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- return -EIO;
+ err = -EIO;
+
+unlock:
+ mutex_unlock(&aux->hw_mutex);
+ return err;
}
/**
@@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
- mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
- mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
+ mutex_lock(&aux->hw_mutex);
+
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 414d7f61aa05..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
+ 704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
- u8 *est = ((u8 *)timing) + 5;
+ u8 *est = ((u8 *)timing) + 6;
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
fail:
while (i--)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
drm_free_large(pages);
return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
drm_free_large(pages);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
- depends on !VIDEO_SAMSUNG_S5P_G2D
+ depends on VIDEO_SAMSUNG_S5P_G2D=n
select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c522b2..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
- exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+ exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
return 0;
err:
- list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close)
subdrv->close(dev, subdrv->dev, file);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
-}
-
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c44f1d..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
#ifndef _EXYNOS_DRM_FBDEV_H_
#define _EXYNOS_DRM_FBDEV_H_
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484ae9f49..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
* clock. On these SoCs the bootloader may enable it but any
* power domain off/on will reset it to disable state.
*/
- if (ctx->driver_data != &exynos5_fimd_driver_data ||
+ if (ctx->driver_data != &exynos5_fimd_driver_data &&
ctx->driver_data != &exynos5420_fimd_driver_data)
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
} else
val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
- regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+ ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
DRM_ERROR("mic: Failed to read system register\n");
}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
DRM_ERROR("mic: Failed to get system register.\n");
+ ret = PTR_ERR(mic->sysreg);
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
}
static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
{
struct drm_plane_state *state = &exynos_state->base;
- struct drm_crtc *crtc = exynos_state->base.crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
* FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
* be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
*/
-static int __deprecated
+static int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e82008b8b6..30798cbc6fc0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
-
intel_dp_mst_resume(dev);
+ intel_display_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3ac589e..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);
- page_cache_release(page);
+ put_page(page);
vaddr += PAGE_SIZE;
}
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
vaddr += PAGE_SIZE;
}
obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
obj->dirty = 0;
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- page_cache_release(sg_page_iter_page(&sg_iter));
+ put_page(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6be40f3ba2c7..18ba8139e922 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
obj->dirty = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5ab3f..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698fe2f7..937e77228466 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
- intel_connector->unregister(intel_connector);
-
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403a8f4f..cd9fe609aefb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev)) {
- drm_modeset_lock_all(dev);
+ if (!HAS_PCH_SPLIT(dev))
intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
- }
dev_priv->modeset_restore = MODESET_DONE;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 2a95d10e9d92..a24631fdf4ad 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
if (!iores)
return -ENXIO;
- platform_set_drvdata(pdev, hdmi);
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (ret)
+ drm_encoder_cleanup(encoder);
+
+ return ret;
}
static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9876e0f0c3e1..e26dcdec2aba 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
{
struct imx_drm_device *imxdrm = drm->dev_private;
struct imx_drm_crtc *imx_drm_crtc;
- int ret;
/*
* The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- goto err_register;
-
drm_crtc_helper_add(crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
@@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
return 0;
-
-err_register:
- imxdrm->crtc[--imxdrm->pipes] = NULL;
- kfree(imx_drm_crtc);
- return ret;
}
EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 588827844f30..681ec6eb77d9 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_cma_object *cma_obj;
- unsigned long eba;
- int active;
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_DEBUG_KMS("entry is null.\n");
- return -EFAULT;
+ struct drm_gem_cma_object *cma_obj[3];
+ unsigned long eba, ubo, vbo;
+ int active, i;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
+ if (!cma_obj[i]) {
+ DRM_DEBUG_KMS("plane %d entry is null.\n", i);
+ return -EFAULT;
+ }
}
- dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
- &cma_obj->paddr, x, y);
-
- eba = cma_obj->paddr + fb->offsets[0] +
+ eba = cma_obj[0]->paddr + fb->offsets[0] +
fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+ if (eba & 0x7) {
+ DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
+ DRM_DEBUG_KMS("pitches out of range.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
+ DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ ipu_plane->stride[0] = fb->pitches[0];
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ ubo = cma_obj[1]->paddr + fb->offsets[1] +
+ fb->pitches[1] * y / 2 + x / 2 - eba;
+ vbo = cma_obj[2]->paddr + fb->offsets[2] +
+ fb->pitches[2] * y / 2 + x / 2 - eba;
+
+ if ((ubo & 0x7) || (vbo & 0x7)) {
+ DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
+ return -EINVAL;
+ }
+
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
+ DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
+ (ipu_plane->v_offset != vbo))) {
+ DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2]) {
+ DRM_DEBUG_KMS("U/V pitches must be identical.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
+ DRM_DEBUG_KMS("U/V pitches out of range.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled &&
+ (ipu_plane->stride[1] != fb->pitches[1])) {
+ DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ ipu_plane->u_offset = ubo;
+ ipu_plane->v_offset = vbo;
+ ipu_plane->stride[1] = fb->pitches[1];
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phys = %pad %pad %pad, x = %d, y = %d",
+ &cma_obj[0]->paddr, &cma_obj[1]->paddr,
+ &cma_obj[2]->paddr, x, y);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+ &cma_obj[0]->paddr, x, y);
+ break;
+ }
+
if (ipu_plane->enabled) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
}
}
- ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
- if (ret) {
- dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
- return ret;
- }
-
ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
calc_bandwidth(crtc_w, crtc_h,
calc_vref(mode)), 64);
@@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
return ret;
}
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
+
ipu_cpmem_zero(ipu_plane->ipu_ch);
ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
if (interlaced)
ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
+ if (fb->pixel_format == DRM_FORMAT_YUV420) {
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ ipu_plane->stride[1],
+ ipu_plane->u_offset,
+ ipu_plane->v_offset);
+ } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ ipu_plane->stride[1],
+ ipu_plane->v_offset,
+ ipu_plane->u_offset);
+ }
+
ipu_plane->w = src_w;
ipu_plane->h = src_h;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b413c60..4448fd4ad4eb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@ struct ipu_plane {
int w;
int h;
+ unsigned int u_offset;
+ unsigned int v_offset;
+ unsigned int stride[2];
+
bool enabled;
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b04a64664673..65428cf233ce 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
int msm_hdmi_pll_8960_init(struct platform_device *pdev);
int msm_hdmi_pll_8996_init(struct platform_device *pdev);
#else
-static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
return -ENODEV;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d52910e2c26c..c03b96709179 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
struct msm_file_private *ctx = file->driver_priv;
struct msm_kms *kms = priv->kms;
- if (kms)
- kms->funcs->preclose(kms, file);
-
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
priv->lastctx = NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..e32222c3d44f 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -55,7 +55,6 @@ struct msm_kms_funcs {
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
/* cleanup: */
- void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 16641cec18a2..b5370cb56e3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -11,6 +11,7 @@ struct nvkm_device_tegra {
struct reset_control *rst;
struct clk *clk;
+ struct clk *clk_ref;
struct clk *clk_pwr;
struct regulator *vdd;
@@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
* bypassed). A value of 0 means an IOMMU is never used.
*/
u8 iommu_bit;
+ /*
+ * Whether the chip requires a reference clock
+ */
+ bool require_ref_clk;
};
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 2dfe58af12e4..4c4cc2260257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
};
+static const struct nvkm_device_tegra_func gm20b_platform_data = {
+ .iommu_bit = 34,
+ .require_ref_clk = true,
+};
+
static const struct of_device_id nouveau_platform_match[] = {
{
.compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
},
{
.compatible = "nvidia,gm20b",
- .data = &gk20a_platform_data,
+ .data = &gm20b_platform_data,
},
{ }
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9afa5f3e3c1c..ec12efb4689a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
ret = clk_prepare_enable(tdev->clk);
if (ret)
goto err_clk;
+ if (tdev->clk_ref) {
+ ret = clk_prepare_enable(tdev->clk_ref);
+ if (ret)
+ goto err_clk_ref;
+ }
ret = clk_prepare_enable(tdev->clk_pwr);
if (ret)
goto err_clk_pwr;
@@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
err_clamp:
clk_disable_unprepare(tdev->clk_pwr);
err_clk_pwr:
+ if (tdev->clk_ref)
+ clk_disable_unprepare(tdev->clk_ref);
+err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
udelay(10);
clk_disable_unprepare(tdev->clk_pwr);
+ if (tdev->clk_ref)
+ clk_disable_unprepare(tdev->clk_ref);
clk_disable_unprepare(tdev->clk);
udelay(10);
@@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto free;
}
+ if (func->require_ref_clk)
+ tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(tdev->clk_ref)) {
+ ret = PTR_ERR(tdev->clk_ref);
+ goto free;
+ }
+
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
if (IS_ERR(tdev->clk_pwr)) {
ret = PTR_ERR(tdev->clk_pwr);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f503d1c5..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cf61e0856f4a..b80b08f71cb4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- drm_vblank_on(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
#define NI_DP_MSE_SAT2 0x7398
#define NI_DP_MSE_SAT_UPDATE 0x739c
+# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
+# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
#define NI_DIG_BE_CNTL 0x7140
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 43cffb526b0c..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
do {
+ unsigned value1, value2;
+ udelay(10);
temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
- } while ((temp & 0x1) && retries++ < 10000);
+
+ value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+ value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+ if (!value1 && !value2)
+ break;
+ } while (retries++ < 50);
if (retries == 10000)
DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
return 0;
}
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
{
struct drm_device *dev = mst->base.dev;
struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
uint32_t val, temp;
uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
int retries = 0;
+ uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+ uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
do {
temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ udelay(10);
} while ((temp & 0x1) && (retries++ < 10000));
if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
- DRM_DEBUG_KMS("\n");
- return 0;
-}
-
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = radeon_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
int ret, slots;
-
+ s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
if (!ASIC_IS_DCE5(rdev)) {
DRM_ERROR("got mst dpms on non-DCE5\n");
return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
mst_enc->enc_active = true;
radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
- radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ fixed_pbn = drm_int2fixp(mst_enc->pbn);
+ fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+ avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
mst_enc->fe);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf65f2c..1e9304d1c88f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (r) {
return r;
}
+ rdev->ddev->vblank_disable_allowed = true;
+
/* enable msi */
rdev->msi_enabled = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 24152dfef199..478d4099b0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
- drm_vblank_on(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index dd46c38676db..2d901bf28a94 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
+ /* Can't move a pinned BO to visible VRAM */
+ if (rbo->pin_count > 0)
+ return -EINVAL;
+
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6d8c32377c6f..7dddfdce85e6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -397,9 +397,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
+ struct radeon_bo *rbo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ /* Can't move a pinned BO */
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ if (WARN_ON_ONCE(rbo->pin_count > 0))
+ return -EINVAL;
+
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
radeon_move_null(bo, new_mem);
@@ -609,7 +615,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cb75ab72098a..af4df81c4e0c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 3d3cf2f8891e..d5cfef75fc80 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (!iores)
return -ENXIO;
- platform_set_drvdata(pdev, hdmi);
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (ret)
+ drm_encoder_cleanup(encoder);
+
+ return ret;
}
static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 896da09e49ee..f556a8f4fde6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
return 0;
}
+static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+ struct drm_file *file_priv)
+{
+ struct rockchip_drm_private *priv = crtc->dev->dev_private;
+ int pipe = drm_crtc_index(crtc);
+
+ if (pipe < ROCKCHIP_MAX_CRTC &&
+ priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->cancel_pending_vblank)
+ priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
+}
+
+static void rockchip_drm_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+}
+
void rockchip_drm_lastclose(struct drm_device *dev)
{
struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = {
DRIVER_PRIME | DRIVER_ATOMIC,
.load = rockchip_drm_load,
.unload = rockchip_drm_unload,
+ .preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3529f692edb8..00d17d71aa4c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
void (*wait_for_update)(struct drm_crtc *crtc);
+ void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
};
struct rockchip_atomic_commit {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fd370548d7d7..a619f120f801 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -499,10 +499,25 @@ err_disable_hclk:
static void vop_crtc_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
+ int i;
if (!vop->is_enabled)
return;
+ /*
+ * We need to make sure that all windows are disabled before we
+ * disable that crtc. Otherwise we might try to scan from a destroyed
+ * buffer later.
+ */
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ spin_unlock(&vop->reg_lock);
+ }
+
drm_crtc_vblank_off(crtc);
/*
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->fb;
struct vop_win *vop_win = to_vop_win(plane);
struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- crtc = crtc ? crtc : plane->state->crtc;
- /*
- * Both crtc or plane->state->crtc can be null.
- */
if (!crtc || !fb)
goto out_disable;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
src->x1 = state->src_x;
src->y1 = state->src_y;
src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
clip.x1 = 0;
clip.y1 = 0;
- clip.x2 = crtc->mode.hdisplay;
- clip.y2 = crtc->mode.vdisplay;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
ret = drm_plane_helper_check_update(plane, crtc, state->fb,
src, dest, &clip,
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
}
+static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+ struct drm_file *file_priv)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vop *vop = to_vop(crtc);
+ struct drm_pending_vblank_event *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ e = vop->event;
+ if (e && e->base.file_priv == file_priv) {
+ vop->event = NULL;
+
+ e->base.destroy(&e->base);
+ file_priv->event_space += sizeof(e->event);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
static const struct rockchip_crtc_funcs private_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.wait_for_update = vop_crtc_wait_for_update,
+ .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
};
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
{
struct vop *vop = to_vop(crtc);
- if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
- return false;
-
adjusted_mode->clock =
clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop)
const struct vop_data *vop_data = vop->data;
struct device *dev = vop->dev;
struct drm_device *drm_dev = vop->drm_dev;
- struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+ struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
struct drm_crtc *crtc = &vop->crtc;
struct device_node *port;
int ret;
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop)
ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
&vop_crtc_funcs, NULL);
if (ret)
- return ret;
+ goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop)
if (!port) {
DRM_ERROR("no port node found in %s\n",
dev->of_node->full_name);
+ ret = -ENOENT;
goto err_cleanup_crtc;
}
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop)
err_cleanup_crtc:
drm_crtc_cleanup(crtc);
err_cleanup_planes:
- list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+ list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+ head)
drm_plane_cleanup(plane);
return ret;
}
@@ -1202,9 +1238,28 @@ err_cleanup_planes:
static void vop_destroy_crtc(struct vop *vop)
{
struct drm_crtc *crtc = &vop->crtc;
+ struct drm_device *drm_dev = vop->drm_dev;
+ struct drm_plane *plane, *tmp;
rockchip_unregister_crtc_funcs(crtc);
of_node_put(crtc->port);
+
+ /*
+ * We need to cleanup the planes now. Why?
+ *
+ * The planes are "&vop->win[i].base". That means the memory is
+ * all part of the big "struct vop" chunk of memory. That memory
+ * was devm allocated and associated with this component. We need to
+ * free it ourselves before vop_unbind() finishes.
+ */
+ list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+ head)
+ vop_plane_destroy(plane);
+
+ /*
+ * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
+ * references the CRTC.
+ */
drm_crtc_cleanup(crtc);
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
copy_highpage(to_page, from_page);
- page_cache_release(from_page);
+ put_page(from_page);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
- page_cache_release(to_page);
+ put_page(to_page);
}
ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 33239a2b264a..fd1eb9d03f0b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index e797dfc07ae3..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
- page_cache_release(page);
+ put_page(page);
}
}
case dr_via_pages_alloc:
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314cd83a..6494a4d28171 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
- u32 pixel_format, int stride,
- int u_offset, int v_offset)
+ unsigned int uv_stride,
+ unsigned int u_offset, unsigned int v_offset)
{
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YUV422P:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
- break;
- case V4L2_PIX_FMT_YVU420:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
- break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
- break;
- }
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
u32 pixel_format, int stride, int height)
{
- int u_offset, v_offset;
+ int fourcc, u_offset, v_offset;
int uv_stride = 0;
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
+ fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
+ switch (fourcc) {
+ case DRM_FORMAT_YUV420:
uv_stride = stride / 2;
u_offset = stride * height;
v_offset = u_offset + (uv_stride * height / 2);
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, v_offset);
break;
- case V4L2_PIX_FMT_YUV422P:
+ case DRM_FORMAT_YVU420:
+ uv_stride = stride / 2;
+ v_offset = stride * height;
+ u_offset = v_offset + (uv_stride * height / 2);
+ break;
+ case DRM_FORMAT_YUV422:
uv_stride = stride / 2;
u_offset = stride * height;
v_offset = u_offset + (uv_stride * height);
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, v_offset);
break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ uv_stride = stride;
u_offset = stride * height;
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, 0);
+ v_offset = 0;
break;
+ default:
+ return;
}
+ ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = U_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = V_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
+ case V4L2_PIX_FMT_YVU420:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+ v_offset, u_offset);
+ break;
case V4L2_PIX_FMT_YUV422P:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
v_offset = V2_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
image->rect.top) - offset;
v_offset = 0;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
image->rect.top) - offset;
v_offset = 0;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c3958e2a0..837b1ec22800 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@ out:
}
EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
u32 dmfc_gen1;
+ mutex_lock(&priv->mutex);
+
dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
- return 0;
+ mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
{
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bdb8cc89cacc..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5c0e43ed5c53..c6eaff5f8845 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -676,6 +676,7 @@
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
#define USB_DEVICE_ID_MS_NE4K 0x00db
#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -683,6 +684,8 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_NE7K 0x071d
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
unsigned char byte2, unsigned char byte3)
{
int ret;
- unsigned char buf[] = {0x18, byte2, byte3};
+ unsigned char *buf;
+
+ buf = kzalloc(3, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = 0x18;
+ buf[1] = byte2;
+ buf[2] = byte3;
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
- ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
break;
case USB_DEVICE_ID_LENOVO_CBTKBD:
- ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+ ret = hid_hw_output_report(hdev, buf, 3);
break;
default:
ret = -EINVAL;
break;
}
+ kfree(buf);
+
return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 75cd3bc59c54..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
.driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 25d3c4330bf6..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
MT_TOOL_FINGER,
false);
}
+ input_mt_sync_frame(input_dev);
input_sync(input_dev);
}
}
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
* -----+------------------------------+-----+-----+
* The single bits Yaw, Roll, Pitch in the lower right corner specify
* whether the wiimote is rotating fast (0) or slow (1). Speed for slow
- * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
- * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
- * and 9 for slow.
+ * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+ * units / deg/s. To get a linear scale for fast rotation we multiply
+ * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+ * previous scale reported by this driver.
+ * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
* If the wiimote is not rotating the sensor reports 2^13 = 8192.
* Ext specifies whether an extension is connected to the motionp.
* which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
z -= 8192;
if (!(ext[3] & 0x02))
- x *= 18;
+ x = (x * 2000 * 9) / 440;
else
x *= 9;
if (!(ext[4] & 0x02))
- y *= 18;
+ y = (y * 2000 * 9) / 440;
else
y *= 9;
if (!(ext[3] & 0x01))
- z *= 18;
+ z = (z * 2000 * 9) / 440;
else
z *= 9;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 68a560957871..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
hid_data->inputmode = field->report->id;
hid_data->inputmode_index = usage->usage_index;
break;
+
+ case HID_UP_DIGITIZER:
+ if (field->report->id == 0x0B &&
+ (field->application == WACOM_G9_DIGITIZER ||
+ field->application == WACOM_G11_DIGITIZER)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
+
+ case WACOM_G9_PAGE:
+ case WACOM_G11_PAGE:
+ if (field->report->id == 0x03 &&
+ (field->application == WACOM_G9_TOUCHSCREEN ||
+ field->application == WACOM_G11_TOUCHSCREEN)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
}
}
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
return 0;
}
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
- int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+ struct wacom_wac *wacom_wac)
{
- unsigned char *rep_data;
+ u8 *rep_data;
+ struct hid_report *r;
+ struct hid_report_enum *re;
+ int length;
int error = -ENOMEM, limit = 0;
- rep_data = kzalloc(length, GFP_KERNEL);
+ if (wacom_wac->mode_report < 0)
+ return 0;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[wacom_wac->mode_report];
+ if (!r)
+ return -EINVAL;
+
+ rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
if (!rep_data)
- return error;
+ return -ENOMEM;
+
+ length = hid_report_len(r);
do {
- rep_data[0] = report_id;
- rep_data[1] = mode;
+ rep_data[0] = wacom_wac->mode_report;
+ rep_data[1] = wacom_wac->mode_value;
error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
length, 1);
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
rep_data, length, 1);
- } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+ } while (error >= 0 &&
+ rep_data[1] != wacom_wac->mode_report &&
+ limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
static int wacom_query_tablet_data(struct hid_device *hdev,
struct wacom_features *features)
{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
- if (features->type == HID_GENERIC)
- return wacom_hid_set_device_mode(hdev);
-
- if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
- if (features->type > TABLETPC) {
- /* MT Tablet PC touch */
- return wacom_set_device_mode(hdev, 3, 4, 4);
- }
- else if (features->type == WACOM_24HDT) {
- return wacom_set_device_mode(hdev, 18, 3, 2);
- }
- else if (features->type == WACOM_27QHDT) {
- return wacom_set_device_mode(hdev, 131, 3, 2);
- }
- else if (features->type == BAMBOO_PAD) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
- }
- } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
- if (features->type <= BAMBOO_PT) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
+ if (features->type != HID_GENERIC) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+ if (features->type > TABLETPC) {
+ /* MT Tablet PC touch */
+ wacom_wac->mode_report = 3;
+ wacom_wac->mode_value = 4;
+ } else if (features->type == WACOM_24HDT) {
+ wacom_wac->mode_report = 18;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == WACOM_27QHDT) {
+ wacom_wac->mode_report = 131;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == BAMBOO_PAD) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
+ } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+ if (features->type <= BAMBOO_PT) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
}
}
+ wacom_set_device_mode(hdev, wacom_wac);
+
+ if (features->type == HID_GENERIC)
+ return wacom_hid_set_device_mode(hdev);
+
return 0;
}
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
goto fail_type;
}
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bd198bbd4df0..02c4efea241c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2426,6 +2426,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
+#define WACOM_G9_PAGE 0xff090000
+#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE 0xff110000
+#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
int ps_connected;
u8 bt_features;
u8 bt_high_speed;
+ int mode_report;
+ int mode_value;
struct hid_data hid_data;
};
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
+ if (!the_max1111 || !the_max1111->spi)
+ return -ENODEV;
+
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
+#ifdef CONFIG_SHARPSL_PM
+ the_max1111 = NULL;
+#endif
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..ba14a863b451 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&clk_freq);
if (ret) {
- dev_err(&pdev->dev, "clock-frequency not specified in DT");
+ dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
goto err;
}
i2c->speed = clk_freq / 1000;
+ if (i2c->speed == 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
+ goto err;
+ }
jz4780_i2c_set_speed(i2c);
dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0f2f8484e8ec..e584d88ee337 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -525,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-
-/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev);
int rc;
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
- if (add_uevent_var(env, "MODALIAS=%s%s",
- I2C_MODULE_PREFIX, client->name))
- return -ENOMEM;
- dev_dbg(dev, "uevent\n");
- return 0;
+ return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
}
/* i2c bus recovery routines */
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 7748a0a5ddb9..8de073aed001 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -140,22 +140,34 @@ static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_
return i2c_demux_activate_master(priv, new_chan);
}
-static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t available_masters_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
int count = 0, i;
for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
- count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n",
- i == priv->cur_chan ? '*' : ' ', i,
- priv->chan[i].parent_np->full_name);
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
+ i, priv->chan[i].parent_np->full_name,
+ i == priv->num_chan - 1 ? '\n' : ' ');
return count;
}
+static DEVICE_ATTR_RO(available_masters);
-static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", priv->cur_chan);
+}
+
+static ssize_t current_master_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
unsigned int val;
@@ -172,7 +184,7 @@ static ssize_t cur_master_store(struct device *dev, struct device_attribute *att
return ret < 0 ? ret : count;
}
-static DEVICE_ATTR_RW(cur_master);
+static DEVICE_ATTR_RW(current_master);
static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
{
@@ -218,12 +230,18 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
/* switch to first parent as active master */
i2c_demux_activate_master(priv, 0);
- err = device_create_file(&pdev->dev, &dev_attr_cur_master);
+ err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
goto err_rollback;
+ err = device_create_file(&pdev->dev, &dev_attr_current_master);
+ if (err)
+ goto err_rollback_available;
+
return 0;
+err_rollback_available:
+ device_remove_file(&pdev->dev, &dev_attr_available_masters);
err_rollback:
for (j = 0; j < i; j++) {
of_node_put(priv->chan[j].parent_np);
@@ -238,7 +256,8 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
int i;
- device_remove_file(&pdev->dev, &dev_attr_cur_master);
+ device_remove_file(&pdev->dev, &dev_attr_current_master);
+ device_remove_file(&pdev->dev, &dev_attr_available_masters);
i2c_demux_deactivate_master(priv);
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 9f0a48e39b8a..80e933b296f6 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -451,7 +451,7 @@ err_free:
return ret;
}
-static const struct ide_port_info icside_v6_port_info __initconst = {
+static const struct ide_port_info icside_v6_port_info = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 8012e43bf8f6..46427ea01753 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
clk_enable(clk);
rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
/* NOTE: round *down* to meet minimum timings; we count in clocks */
ideclk_period = 1000000000UL / rate;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ba947df5a8c7..c6935de425fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state skx_cstates[] = {
+ {
+ .name = "C1-SKX",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-SKX",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-SKX",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 600,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] = {
{
.name = "C1E-ATM",
@@ -818,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
* driver in this case
*/
dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
- if (!dev->registered)
- intel_idle_cpu_init(hotcpu);
+ if (dev->registered)
+ break;
+
+ if (intel_idle_cpu_init(hotcpu))
+ return NOTIFY_BAD;
break;
}
@@ -904,6 +936,10 @@ static const struct idle_cpu idle_cpu_skl = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_skx = {
+ .state_table = skx_cstates,
+ .disable_promotion_to_c1e = true,
+};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
@@ -945,6 +981,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x56, idle_cpu_bdw),
ICPU(0x4e, idle_cpu_skl),
ICPU(0x5e, idle_cpu_skl),
+ ICPU(0x8e, idle_cpu_skl),
+ ICPU(0x9e, idle_cpu_skl),
+ ICPU(0x55, idle_cpu_skx),
ICPU(0x57, idle_cpu_knl),
{}
};
@@ -987,22 +1026,15 @@ static int __init intel_idle_probe(void)
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
- if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else
- on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
-
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
- pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
- lapic_timer_reliable_states);
return 0;
}
/*
* intel_idle_cpuidle_devices_uninit()
- * unregister, free cpuidle_devices
+ * Unregisters the cpuidle devices.
*/
static void intel_idle_cpuidle_devices_uninit(void)
{
@@ -1013,9 +1045,6 @@ static void intel_idle_cpuidle_devices_uninit(void)
dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
-
- free_percpu(intel_idle_cpuidle_devices);
- return;
}
/*
@@ -1111,7 +1140,7 @@ static void intel_idle_state_table_update(void)
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
-static int __init intel_idle_cpuidle_driver_init(void)
+static void __init intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1163,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
drv->state_count += 1;
}
- if (icpu->auto_demotion_disable_flags)
- on_each_cpu(auto_demotion_disable, NULL, 1);
-
if (icpu->byt_auto_demotion_disable_flag) {
wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
-
- if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
- on_each_cpu(c1e_promotion_disable, NULL, 1);
-
- return 0;
}
@@ -1193,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
if (cpuidle_register_device(dev)) {
pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
- intel_idle_cpuidle_devices_uninit();
return -EIO;
}
@@ -1218,40 +1238,51 @@ static int __init intel_idle_init(void)
if (retval)
return retval;
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
+ free_percpu(intel_idle_cpuidle_devices);
return retval;
}
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
cpu_notifier_register_begin();
for_each_online_cpu(i) {
retval = intel_idle_cpu_init(i);
if (retval) {
+ intel_idle_cpuidle_devices_uninit();
cpu_notifier_register_done();
cpuidle_unregister_driver(&intel_idle_driver);
+ free_percpu(intel_idle_cpuidle_devices);
return retval;
}
}
__register_cpu_notifier(&cpu_hotplug_notifier);
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ else
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+
cpu_notifier_register_done();
+ pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
+
return 0;
}
static void __exit intel_idle_exit(void)
{
- intel_idle_cpuidle_devices_uninit();
- cpuidle_unregister_driver(&intel_idle_driver);
+ struct cpuidle_device *dev;
+ int i;
cpu_notifier_register_begin();
@@ -1259,9 +1290,15 @@ static void __exit intel_idle_exit(void)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
__unregister_cpu_notifier(&cpu_hotplug_notifier);
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
cpu_notifier_register_done();
- return;
+ cpuidle_unregister_driver(&intel_idle_driver);
+ free_percpu(intel_idle_cpuidle_devices);
}
module_init(intel_idle_init);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c73331f7782b..2072a31e813b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
{
int ret;
int axis = chan->scan_index;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
}
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
- &raw_val, 2);
+ &raw_val, sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(raw_val >> chan->scan_type.shift,
+ *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
ret = bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.realbits = (bits), \
.storagebits = 16, \
.shift = 16 - (bits), \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index af4aea7b20f9..82c718c515a0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -134,6 +134,7 @@ config AT91_ADC
config AT91_SAMA5D2_ADC
tristate "Atmel AT91 SAMA5D2 ADC"
depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say yes here to build support for Atmel SAMA5D2 ADC which is
available on SAMA5D2 SoC family.
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e5266c..998dc3caad4c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11644] = {
.bits = 12,
- .int_vref_mv = 2048,
+ .int_vref_mv = 4096,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11645] = {
.bits = 12,
- .int_vref_mv = 4096,
+ .int_vref_mv = 2048,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11646] = {
.bits = 10,
- .int_vref_mv = 2048,
+ .int_vref_mv = 4096,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11647] = {
.bits = 10,
- .int_vref_mv = 4096,
+ .int_vref_mv = 2048,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
{ "max11615", max11615 },
{ "max11616", max11616 },
{ "max11617", max11617 },
+ { "max11644", max11644 },
+ { "max11645", max11645 },
+ { "max11646", max11646 },
+ { "max11647", max11647 },
{}
};
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index bbce3b09ac45..4dac567e75b4 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
int ret;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
}
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
- 2);
+ sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
return ret;
}
- *val = sign_extend32(raw_val, 15);
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->buffer[i++] = ret;
+ data->buffer[i++] = val;
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 09db89359544..90ab8a2d2846 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
mutex_lock(&data->lock);
- while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) {
+ while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
ret = max30100_read_measurement(data);
if (ret)
break;
iio_push_to_buffers(data->indio_dev, data->buffer);
+ cnt--;
}
mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index a7f557af4389..847455a2d6bb 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -9,9 +9,8 @@ config INV_MPU6050_IIO
config INV_MPU6050_I2C
tristate "Invensense MPU6050 devices (I2C)"
- depends on I2C
+ depends on I2C_MUX
select INV_MPU6050_IIO
- select I2C_MUX
select REGMAP_I2C
help
This driver supports the Invensense MPU6050 devices.
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b976332d45d3..90462fcf5436 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -653,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
unsigned int modes;
memset(config, 0, sizeof(*config));
+ config->watermark = ~0;
/*
* If there is just one buffer and we are removing it there is nothing
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index f6a07dc32ae4..a6af56ad10e1 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
mutex_lock(&data->lock);
data->gesture_mode_running = 1;
- while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) {
+ while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
&data->buffer, 4);
@@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
goto err_read;
iio_push_to_buffers(data->indio_dev, data->buffer);
+ cnt--;
}
err_read:
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
{
}
+#define ST_MAGN_TRIGGER_SET_STATE NULL
#endif /* CONFIG_IIO_BUFFER */
#endif /* ST_MAGN_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 92745d755272..38f917a6c778 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
/**
* i40iw_get_dst_ipv6
*/
-#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
struct sockaddr_in6 *dst_addr)
{
@@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
dst = ip6_route_output(&init_net, NULL, &fl6);
return dst;
}
-#endif
/**
* i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
* @dst_ip: remote ip address
* @arpindex: if there is an arp entry
*/
-#if IS_ENABLED(CONFIG_IPV6)
static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
u32 *src,
u32 *dest,
@@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst_release(dst);
return rc;
}
-#endif
/**
* i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_info->loc_addr[0],
cm_info->rem_addr[0],
oldarpindex);
-#if IS_ENABLED(CONFIG_IPV6)
- else
+ else if (IS_ENABLED(CONFIG_IPV6))
arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
cm_info->loc_addr,
cm_info->rem_addr,
oldarpindex);
-#endif
+ else
+ arpindex = -EINVAL;
}
if (arpindex < 0) {
i40iw_pr_err("cm_node arpindex\n");
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f16c818ad2e6..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
unsigned long end);
-int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_info *info);
-int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
- u8 port, int state);
-int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_stats *stats);
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
- u64 guid, int type);
-
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u8 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+ u64 guid, int type);
+
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 60b30d338a81..411e4464ca23 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -63,7 +63,6 @@ isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
-static void isert_wait4flush(struct isert_conn *isert_conn);
static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -141,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.qp_context = isert_conn;
attr.send_cq = comp->cq;
attr.recv_cq = comp->cq;
- attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+ attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -887,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
break;
case ISER_CONN_UP:
isert_conn_terminate(isert_conn);
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_handle_unbound_conn(isert_conn);
break;
case ISER_CONN_BOUND:
@@ -3213,36 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
}
}
-static void
-isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct isert_conn *isert_conn = wc->qp->qp_context;
-
- isert_print_wc(wc, "beacon");
-
- isert_info("conn %p completing wait_comp_err\n", isert_conn);
- complete(&isert_conn->wait_comp_err);
-}
-
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
- struct ib_recv_wr *bad_wr;
- static struct ib_cqe cqe = { .done = isert_beacon_done };
-
- isert_info("conn %p\n", isert_conn);
-
- init_completion(&isert_conn->wait_comp_err);
- isert_conn->beacon.wr_cqe = &cqe;
- /* post an indication that all flush errors were consumed */
- if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
- isert_err("conn %p failed to post beacon", isert_conn);
- return;
- }
-
- wait_for_completion(&isert_conn->wait_comp_err);
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -3288,7 +3257,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
@@ -3300,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_conn(isert_conn);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 192788a4820c..147900cbb578 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -209,14 +209,12 @@ struct isert_conn {
struct ib_qp *qp;
struct isert_device *device;
struct mutex mutex;
- struct completion wait_comp_err;
struct kref kref;
struct list_head fr_pool;
int fr_pool_size;
/* lock to protect fastreg pool */
spinlock_t pool_lock;
struct work_struct release_work;
- struct ib_recv_wr beacon;
bool logout_posted;
bool snd_w_inv;
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0bd3cb2f3c67..8b42401d4795 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1264,26 +1264,40 @@ free_mem:
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
- struct se_session *se_sess;
struct srpt_send_ioctx *ioctx;
- int tag;
+ unsigned long flags;
BUG_ON(!ch);
- se_sess = ch->sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
- if (tag < 0) {
- pr_err("Unable to obtain tag for srpt_send_ioctx\n");
- return NULL;
+ ioctx = NULL;
+ spin_lock_irqsave(&ch->spinlock, flags);
+ if (!list_empty(&ch->free_list)) {
+ ioctx = list_first_entry(&ch->free_list,
+ struct srpt_send_ioctx, free_list);
+ list_del(&ioctx->free_list);
}
- ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
- memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
- ioctx->ch = ch;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (!ioctx)
+ return ioctx;
+
+ BUG_ON(ioctx->ch != ch);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
+ ioctx->n_rbuf = 0;
+ ioctx->rbufs = NULL;
+ ioctx->n_rdma = 0;
+ ioctx->n_rdma_wrs = 0;
+ ioctx->rdma_wrs = NULL;
+ ioctx->mapped_sg_count = 0;
init_completion(&ioctx->tx_done);
-
- ioctx->cmd.map_tag = tag;
+ ioctx->queue_status_only = false;
+ /*
+ * transport_init_se_cmd() does not initialize all fields, so do it
+ * here.
+ */
+ memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+ memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
return ioctx;
}
@@ -2021,7 +2035,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch;
u32 it_iu_len;
- int ret = 0;
+ int i, ret = 0;
unsigned char *p;
WARN_ON_ONCE(irqs_disabled());
@@ -2143,6 +2157,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (!ch->ioctx_ring)
goto free_ch;
+ INIT_LIST_HEAD(&ch->free_list);
+ for (i = 0; i < ch->rq_size; i++) {
+ ch->ioctx_ring[i]->ch = ch;
+ list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+ }
+
ret = srpt_create_ch_ib(ch);
if (ret) {
rej->reason = cpu_to_be32(
@@ -2173,8 +2193,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
p = &ch->sess_name[0];
try_again:
- ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
- sizeof(struct srpt_send_ioctx),
+ ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
TARGET_PROT_NORMAL, p, ch, NULL);
if (IS_ERR(ch->sess)) {
pr_info("Rejected login because no ACL has been"
@@ -2881,7 +2900,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx = container_of(se_cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
- struct se_session *se_sess = ch->sess;
+ unsigned long flags;
WARN_ON(ioctx->state != SRPT_STATE_DONE);
WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2892,7 +2911,9 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
ioctx->n_rbuf = 0;
}
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ca288f019315..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -179,6 +179,7 @@ struct srpt_recv_ioctx {
* struct srpt_send_ioctx - SRPT send I/O context.
* @ioctx: See above.
* @ch: Channel pointer.
+ * @free_list: Node in srpt_rdma_ch.free_list.
* @n_rbuf: Number of data buffers in the received SRP command.
* @rbufs: Pointer to SRP data buffer array.
* @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -201,6 +202,7 @@ struct srpt_send_ioctx {
struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf;
struct scatterlist *sg;
+ struct list_head free_list;
spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_dma_len = s->length;
- s->offset = s_offset;
+ s->offset += s_offset;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_offset;
dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
for_each_sg(sg, s, nents, i) {
if (sg_dma_address(s) != DMA_ERROR_CODE)
- s->offset = sg_dma_address(s);
+ s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f14df2..e1852e845d21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
}
/* register PCI DMA alias device */
- if (req_id != dma_alias && dev_is_pci(dev)) {
+ if (dev_is_pci(dev) && req_id != dma_alias) {
tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
dma_alias & 0xff, NULL, domain);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index bfd4f7c3b1d8..b9df1411c894 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (!group->default_domain) {
group->default_domain = __iommu_domain_alloc(dev->bus,
IOMMU_DOMAIN_DMA);
- group->domain = group->default_domain;
+ if (!group->domain)
+ group->domain = group->default_domain;
}
ret = iommu_group_add_device(group, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a6f593a0a29e..5710a06c3049 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -315,8 +315,8 @@ static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
int i;
for (i = 0; i < iommu->num_mmu; i++)
- active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
- RK_MMU_STATUS_STALL_ACTIVE;
+ active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_STALL_ACTIVE);
return active;
}
@@ -327,8 +327,8 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
int i;
for (i = 0; i < iommu->num_mmu; i++)
- enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
- RK_MMU_STATUS_PAGING_ENABLED;
+ enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED);
return enable;
}
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 7fdf78f46433..df7e05ca8f9c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
if (count == 0)
count = 32;
isac_empty_fifo(cs, count);
- if ((count = cs->rcvidx) > 0) {
+ count = cs->rcvidx;
+ if (count > 0) {
cs->rcvidx = 0;
- if (!(skb = alloc_skb(count, GFP_ATOMIC)))
+ skb = alloc_skb(count, GFP_ATOMIC);
+ if (!skb)
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
cs->tx_skb = NULL;
}
}
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
+ cs->tx_skb = skb_dequeue(&cs->sq);
+ if (cs->tx_skb) {
cs->tx_cnt = 0;
isac_fill_fifo(cs);
} else
@@ -313,7 +316,8 @@ afterXPR:
#if ARCOFI_USE
if (v1 & 0x08) {
if (!cs->dc.isac.mon_rx) {
- if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+ cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+ if (!cs->dc.isac.mon_rx) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON RX out of memory!");
cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@ afterXPR:
afterMONR0:
if (v1 & 0x80) {
if (!cs->dc.isac.mon_rx) {
- if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+ cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+ if (!cs->dc.isac.mon_rx) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON RX out of memory!");
cs->dc.isac.mocr &= 0x0f;
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
* Actually now I think of it, it's possible that Ron *is* half the Plan 9
* userbase. Oh well.
*/
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
{
/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*
* This routine indicates if a particular trap number could be delivered
* directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective. See Mastery for
+ * how we could do this anyway...
*/
static bool direct_trap(unsigned int num)
{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
int init_interrupts(void);
void free_interrupts(void);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
return;
break;
case 32 ... 255:
+ /* This might be a syscall. */
+ if (could_be_syscall(cpu->regs->trapnum))
+ break;
+
/*
- * These values mean a real interrupt occurred, in which case
+ * Other values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again.
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index dc11bbf27274..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
- int ret;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
return -EINVAL;
}
- tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
- if (!tdev->signal)
- return -ENOMEM;
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
- ret = copy_from_user(tdev->signal, userbuf, count);
- if (ret) {
+ if (copy_from_user(tdev->signal, userbuf, count)) {
kfree(tdev->signal);
+ tdev->signal = NULL;
return -EFAULT;
}
- return ret < 0 ? ret : count;
+ return count;
}
static const struct file_operations mbox_test_signal_ops = {
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index bd07f39f0692..dd2afbca51c9 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
int i;
ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ if (!ctx)
+ return -ENOMEM;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
if (!of_get_property(np, "mbox-names", NULL)) {
dev_err(cl->dev,
"%s() requires an \"mbox-names\" property\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0ddf638d60f3..043828d541f7 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -361,8 +361,6 @@ static int __init acpi_pcc_probe(void)
struct acpi_generic_address *db_reg;
struct acpi_pcct_hw_reduced *pcct_ss;
pcc_mbox_channels[i].con_priv = pcct_entry;
- pcct_entry = (struct acpi_subtable_header *)
- ((unsigned long) pcct_entry + pcct_entry->length);
/* If doorbell is in system memory cache the virt address */
pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
@@ -370,6 +368,8 @@ static int __init acpi_pcc_probe(void)
if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
db_reg->bit_width/8);
+ pcct_entry = (struct acpi_subtable_header *)
+ ((unsigned long) pcct_entry + pcct_entry->length);
}
pcc_mbox_ctrl.num_chans = count;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7df6b4f1548a..3fe86b54d50b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
static void free_buffers(struct page *page)
{
@@ -1673,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
+ if (bitmap->sysfs_can_clear)
+ sysfs_put(bitmap->sysfs_can_clear);
+
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
md_cluster_stop(bitmap->mddev);
@@ -1712,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- if (bitmap->sysfs_can_clear)
- sysfs_put(bitmap->sysfs_can_clear);
-
bitmap_free(bitmap);
}
/*
* initialize the bitmap structure
* if this returns an error, bitmap_destroy must be called to do clean up
+ * once mddev->bitmap is set
*/
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
{
@@ -1865,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
struct bitmap_counts *counts;
struct bitmap *bitmap = bitmap_create(mddev, slot);
- if (IS_ERR(bitmap))
+ if (IS_ERR(bitmap)) {
+ bitmap_free(bitmap);
return PTR_ERR(bitmap);
+ }
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
@@ -2170,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
else {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
- if (rv) {
- bitmap_destroy(mddev);
+ if (rv)
mddev->bitmap_info.offset = 0;
- }
}
mddev->pers->quiesce(mddev, 0);
- if (rv)
+ if (rv) {
+ bitmap_destroy(mddev);
return rv;
+ }
}
}
}
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef300f8b..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+ down_write(&cmd->root_lock);
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ up_write(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define WRITE_LOCK_VOID(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return; \
- }
+#define WRITE_LOCK(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define WRITE_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return; \
+ } while(0)
#define WRITE_UNLOCK(cmd) \
- up_write(&cmd->root_lock)
+ up_write(&(cmd)->root_lock)
-#define READ_LOCK(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+ up_read(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define READ_LOCK_VOID(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return; \
- }
+#define READ_LOCK(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define READ_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return; \
+ } while(0)
#define READ_UNLOCK(cmd) \
- up_read(&cmd->root_lock)
+ up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be4905769a45..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
tio = alloc_tio(ci, ti, target_bio_nr);
tio->len_ptr = len;
r = clone_bio(tio, bio, sector, *len);
- if (r < 0)
+ if (r < 0) {
+ free_tio(ci->md, tio);
break;
+ }
__map_bio(tio);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c068f171b4eb..194580fba7fd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -718,6 +718,7 @@ static void super_written(struct bio *bio)
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
+ rdev_dec_pending(rdev, mddev);
bio_put(bio);
}
@@ -732,6 +733,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ atomic_inc(&rdev->nr_pending);
+
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
@@ -6883,7 +6886,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
- * on if we are re-adding a preexisting device.
+ * only if we are re-adding a preexisting device.
* So require mddev->pers and MD_DISK_SYNC.
*/
if (mddev->pers) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 39fb21e048e6..a7f2b9c9f8a0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
+ if (first_bad <= this_sector)
/* Cannot use this */
continue;
best_good_sectors = first_bad - this_sector;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
"au8522", 0x8e >> 1, NULL);
if (sd == NULL)
pr_err("analog subdev registration failed\n");
-#ifdef CONFIG_MEDIA_CONTROLLER
- if (sd)
- dev->decoder = &sd->entity;
-#endif
}
/* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 5dc82e8c8670..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
if (dev->media_dev &&
media_devnode_is_registered(&dev->media_dev->devnode)) {
+ /* clear enable_source, disable_source */
+ dev->media_dev->source_priv = NULL;
+ dev->media_dev->enable_source = NULL;
+ dev->media_dev->disable_source = NULL;
+
media_device_unregister(dev->media_dev);
media_device_cleanup(dev->media_dev);
+ kfree(dev->media_dev);
dev->media_dev = NULL;
}
#endif
@@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
Set the status so poll routines can check and avoid
access after disconnect.
*/
- dev->dev_state = DEV_DISCONNECTED;
+ set_bit(DEV_DISCONNECTED, &dev->dev_state);
au0828_rc_unregister(dev);
/* Digital TV */
@@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev,
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device *mdev;
- mdev = media_device_get_devres(&udev->dev);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
@@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev,
{
#ifdef CONFIG_MEDIA_CONTROLLER
int ret;
- struct media_entity *entity, *demod = NULL, *tuner = NULL;
+ struct media_entity *entity, *demod = NULL;
+ struct media_link *link;
if (!dev->media_dev)
return 0;
@@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev,
}
/*
- * Find tuner and demod to disable the link between
- * the two to avoid disable step when tuner is requested
- * by video or audio. Note that this step can't be done
- * until dvb graph is created during dvb register.
+ * Find tuner, decoder and demod.
+ *
+ * The tuner and decoder should be cached, as they'll be used by
+ * au0828_enable_source.
+ *
+ * It also needs to disable the link between tuner and
+ * decoder/demod, to avoid disable step when tuner is requested
+ * by video or audio. Note that this step can't be done until dvb
+ * graph is created during dvb register.
*/
media_device_for_each_entity(entity, dev->media_dev) {
- if (entity->function == MEDIA_ENT_F_DTV_DEMOD)
+ switch (entity->function) {
+ case MEDIA_ENT_F_TUNER:
+ dev->tuner = entity;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ dev->decoder = entity;
+ break;
+ case MEDIA_ENT_F_DTV_DEMOD:
demod = entity;
- else if (entity->function == MEDIA_ENT_F_TUNER)
- tuner = entity;
+ break;
+ }
}
- /* Disable link between tuner and demod */
- if (tuner && demod) {
- struct media_link *link;
- list_for_each_entry(link, &demod->links, list) {
- if (link->sink->entity == demod &&
- link->source->entity == tuner) {
+ /* Disable link between tuner->demod and/or tuner->decoder */
+ if (dev->tuner) {
+ list_for_each_entry(link, &dev->tuner->links, list) {
+ if (demod && link->sink->entity == demod)
+ media_entity_setup_link(link, 0);
+ if (dev->decoder && link->sink->entity == dev->decoder)
media_entity_setup_link(link, 0);
- }
}
}
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
bool first = true;
/* do nothing if device is disconnected */
- if (ir->dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
return 0;
/* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
/* do nothing if device is disconnected */
- if (ir->dev->dev_state != DEV_DISCONNECTED) {
+ if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13f6dab9ccc2..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
- if (dev->dev_state & DEV_DISCONNECTED) {
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
- if (dev->dev_state & DEV_MISCONFIGURED) {
- pr_info("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
+ if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+ pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
return -EIO;
}
return 0;
@@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->dev_state & DEV_DISCONNECTED) ||
- (dev->dev_state & DEV_MISCONFIGURED))
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+ test_bit(DEV_MISCONFIGURED, &dev->dev_state))
return 0;
if (urb->status < 0) {
@@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
int ret = 0;
dev->stream_state = STREAM_INTERRUPT;
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
else if (ret) {
- dev->dev_state = DEV_MISCONFIGURED;
+ set_bit(DEV_MISCONFIGURED, &dev->dev_state);
dprintk(1, "%s device is misconfigured!\n", __func__);
return ret;
}
@@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
int ret;
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
dev->stream_state = STREAM_OFF;
- dev->dev_state |= DEV_INITIALIZED;
+ set_bit(DEV_INITIALIZED, &dev->dev_state);
}
dev->users++;
mutex_unlock(&dev->lock);
@@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
struct video_device *vdev = video_devdata(filp);
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp)
del_timer_sync(&dev->vbi_timeout);
}
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
goto end;
if (dev->users == 1) {
@@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (dev->std_set_in_tuner_core)
@@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
@@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct au0828_dev *dev = video_drvdata(file);
int rc;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
rc = check_dev(dev);
@@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (norm == dev->std)
@@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
@@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_DVB] = "DVB",
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
tmp = input->index;
@@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*i = dev->ctrl_input;
@@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
@@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
if (a->index != dev->ctrl_ainput)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
@@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (ret)
return ret;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
@@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
cc->bounds.left = 0;
@@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ff7f8510fb77..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
/* device state */
enum au0828_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04
+ DEV_INITIALIZED = 0,
+ DEV_DISCONNECTED = 1,
+ DEV_MISCONFIGURED = 2
};
struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
int input_type;
int std_set_in_tuner_core;
unsigned int ctrl_input;
- enum au0828_dev_state dev_state;
+ long unsigned int dev_state; /* defined at enum au0828_dev_state */;
enum au0828_stream_state stream_state;
wait_queue_head_t open;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 2a7b79bc90fd..2228cd3a846e 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -34,7 +34,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
{
struct media_entity *entity;
struct media_entity *if_vid = NULL, *if_aud = NULL;
- struct media_entity *tuner = NULL, *decoder = NULL, *dtv_demod = NULL;
+ struct media_entity *tuner = NULL, *decoder = NULL;
struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
bool is_webcam = false;
u32 flags;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index df4c052c6bd6..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
if (dma->pages) {
for (i = 0; i < dma->nr_pages; i++)
- page_cache_release(dma->pages[i]);
+ put_page(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
}
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
{
struct inode *root;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = IBMASMFS_MAGIC;
sb->s_op = &ibmasmfs_s_ops;
sb->s_time_gran = 1;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b8fbb0..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
break;
val = kmalloc(len, GFP_KERNEL);
- if (!val)
+ if (!val) {
+ kfree(base);
break;
+ }
*val = 0x12345678;
base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
}
case CT_READ_BUDDY_AFTER_FREE: {
unsigned long p = __get_free_page(GFP_KERNEL);
- int saw, *val = kmalloc(1024, GFP_KERNEL);
+ int saw, *val;
int *base;
if (!p)
break;
- if (!val)
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ free_page(p);
break;
+ }
base = (int *)p;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
if (dirty)
set_page_dirty(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
pages[i] = NULL;
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3bdbe50a363f..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
/* TODO: Replace these with struct ida */
static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
/*
* There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
unsigned int usage;
unsigned int read_only;
unsigned int part_type;
- unsigned int name_idx;
unsigned int reset_done;
#define MMC_BLK_READ BIT(0)
#define MMC_BLK_WRITE BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
goto out;
}
- /*
- * !subname implies we are creating main mmc_blk_data that will be
- * associated with mmc_card with dev_set_drvdata. Due to device
- * partitions, devidx will not coincide with a per-physical card
- * index anymore so we keep track of a name index.
- */
- if (!subname) {
- md->name_idx = find_first_zero_bit(name_use, max_devices);
- __set_bit(md->name_idx, name_use);
- } else
- md->name_idx = ((struct mmc_blk_data *)
- dev_to_disk(parent)->private_data)->name_idx;
-
md->area_type = area_type;
/*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%u%s", md->name_idx, subname ? subname : "");
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
- __clear_bit(md->name_idx, name_use);
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d94607611d8..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
* They have to set these according to their abilities.
*/
host->max_segs = 1;
- host->max_seg_size = PAGE_CACHE_SIZE;
+ host->max_seg_size = PAGE_SIZE;
- host->max_req_size = PAGE_CACHE_SIZE;
+ host->max_req_size = PAGE_SIZE;
host->max_blk_size = 512;
- host->max_blk_count = PAGE_CACHE_SIZE / 512;
+ host->max_blk_count = PAGE_SIZE / 512;
return host;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 62aa5d0efcee..79e19017343e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index aca439d3ca83..30132500aa1c 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
__func__, uhs, ctrl_2);
}
+static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = host->pwr;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (host->pwr == pwr)
+ return;
+
+ if (host->pwr == 0)
+ vdd = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+ }
+}
+
static const struct sdhci_ops pxav3_sdhci_ops = {
.set_clock = sdhci_set_clock,
+ .set_power = pxav3_set_power,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f8c4762bb48d..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
};
-static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
- .pdata = &sdhci_tegra114_pdata,
- .nvquirks = NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_DDR50 |
- NVQUIRK_ENABLE_SDR104 |
- NVQUIRK_HAS_PADCALIB,
-};
-
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
static const struct of_device_id sdhci_tegra_dt_match[] = {
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
- { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8670f162dec7..6bd3d1794966 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1210,10 +1210,24 @@ clock_set:
}
EXPORT_SYMBOL_GPL(sdhci_set_clock);
-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
+
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+}
+
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
u8 pwr = 0;
if (mode != MMC_POWER_OFF) {
@@ -1245,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
- vdd = 0;
} else {
/*
* Spec says that we should clear the power reg before setting
@@ -1276,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
}
+}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
- if (!IS_ERR(mmc->supply.vmmc)) {
- spin_unlock_irq(&host->lock);
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- spin_lock_irq(&host->lock);
- }
+static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (host->ops->set_power)
+ host->ops->set_power(host, mode, vdd);
+ else if (!IS_ERR(mmc->supply.vmmc))
+ sdhci_set_power_reg(host, mode, vdd);
+ else
+ sdhci_set_power(host, mode, vdd);
}
/*****************************************************************************\
@@ -1431,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
}
- sdhci_set_power(host, ios->power_mode, ios->vdd);
+ __sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 3bd28033dbd9..0f39f4f84d10 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -529,6 +529,8 @@ struct sdhci_ops {
#endif
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
+ void (*set_power)(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
int (*enable_dma)(struct sdhci_host *host);
unsigned int (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
}
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8d870ce9f944..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 675435873823..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
}
}
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
@@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
}
}
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 03f6e74c1906..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b2752fe711f2..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
/* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
/*
* Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
break;
}
- page_cache_release(page);
+ put_page(page);
pages--;
index++;
}
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return PTR_ERR(page);
memcpy(buf, page_address(page) + offset, cpylen);
- page_cache_release(page);
+ put_page(page);
if (retlen)
*retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
unlock_page(page);
balance_dirty_pages_ratelimited(mapping);
}
- page_cache_release(page);
+ put_page(page);
if (retlen)
*retlen += cpylen;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b6facac54fc0..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
- * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- /* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
- pr_crit("%s called with NULL mtd->owner!\n", __func__);
- BUG();
- }
-
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
int i;
for (i = 0; i < ns->held_cnt; i++)
- page_cache_release(ns->held_pages[i]);
+ put_page(ns->held_pages[i]);
}
/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
struct page *page;
struct address_space *mapping = file->f_mapping;
- start_index = pos >> PAGE_CACHE_SHIFT;
- end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ start_index = pos >> PAGE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_SHIFT;
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
return -EINVAL;
ns->held_cnt = 0;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fa086e09d6b7..50454be86570 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2264,6 +2264,57 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
mutex_unlock(&ps->smi_mutex);
}
+static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val)
+{
+ int ret;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+restore_page_0:
+ _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+
+ return ret;
+}
+
+static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
+ int reg)
+{
+ int ret;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
+
+ ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+restore_page_0:
+ _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+
+ return ret;
+}
+
+static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
+{
+ int ret;
+
+ ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+ MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (ret & BMCR_PDOWN) {
+ ret &= ~BMCR_PDOWN;
+ ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
+ PAGE_FIBER_SERDES, MII_BMCR,
+ ret);
+ }
+
+ return ret;
+}
+
static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -2367,6 +2418,23 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
goto abort;
}
+ /* If this port is connected to a SerDes, make sure the SerDes is not
+ * powered down.
+ */
+ if (mv88e6xxx_6352_family(ds)) {
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ if (ret < 0)
+ goto abort;
+ ret &= PORT_STATUS_CMODE_MASK;
+ if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
+ (ret == PORT_STATUS_CMODE_1000BASE_X) ||
+ (ret == PORT_STATUS_CMODE_SGMII)) {
+ ret = mv88e6xxx_power_on_serdes(ds);
+ if (ret < 0)
+ goto abort;
+ }
+ }
+
/* Port Control 2: don't force a good FCS, set the maximum frame size to
* 10240 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
@@ -2714,13 +2782,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
- if (ret < 0)
- goto error;
- ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
-error:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
mutex_unlock(&ps->smi_mutex);
+
return ret;
}
@@ -2731,14 +2795,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
- if (ret < 0)
- goto error;
-
- ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
-error:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
mutex_unlock(&ps->smi_mutex);
+
return ret;
}
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 9a038aba48fb..26a424acd10f 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -28,6 +28,10 @@
#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
#define SMI_DATA 0x01
+/* Fiber/SERDES Registers are located at SMI address F, page 1 */
+#define REG_FIBER_SERDES 0x0f
+#define PAGE_FIBER_SERDES 0x01
+
#define REG_PORT(p) (0x10 + (p))
#define PORT_STATUS 0x00
#define PORT_STATUS_PAUSE_EN BIT(15)
@@ -45,6 +49,10 @@
#define PORT_STATUS_MGMII BIT(6) /* 6185 */
#define PORT_STATUS_TX_PAUSED BIT(5)
#define PORT_STATUS_FLOW_CTRL BIT(4)
+#define PORT_STATUS_CMODE_MASK 0x0f
+#define PORT_STATUS_CMODE_100BASE_X 0x8
+#define PORT_STATUS_CMODE_1000BASE_X 0x9
+#define PORT_STATUS_CMODE_SGMII 0xa
#define PORT_PCS_CTRL 0x01
#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aabbd51db981..12a009d720cd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2653,7 +2653,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
- for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
+ for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
@@ -3391,11 +3391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
INVALID_STATS_CTX_ID);
if (rc)
goto err_out;
- cpr->cp_doorbell = bp->bar1 + i * 0x80;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
}
@@ -3830,6 +3830,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input req = {0};
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3855,6 +3856,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ if (resp->hwrm_intf_maj >= 1)
+ bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4555,7 +4559,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
- req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
req->enables |=
cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
} else {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec04c47172b7..709b95b8fcba 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -477,6 +477,7 @@ struct rx_tpa_end_cmp_ext {
#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define DFLT_HWRM_CMD_TIMEOUT 500
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -953,6 +954,7 @@ struct bnxt {
dma_addr_t hw_tx_port_stats_map;
int hw_port_stats_size;
+ u16 hwrm_max_req_len;
int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ada1662b651..2e472f6dbf2d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -855,10 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
if (BNXT_VF(bp))
return;
epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
- epause->rx_pause =
- ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0);
- epause->tx_pause =
- ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
+ epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
+ epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
static int bnxt_set_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd03cb3a..cf6445d148ca 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1171,6 +1171,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int pkts_compl = 0;
+ unsigned int bytes_compl = 0;
unsigned int c_index;
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
@@ -1193,16 +1194,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
if (tx_cb_ptr->skb) {
pkts_compl++;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dev->stats.tx_bytes +=
- dma_unmap_len(tx_cb_ptr, dma_len);
dma_unmap_page(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1218,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ dev->stats.tx_packets += pkts_compl;
+ dev->stats.tx_bytes += bytes_compl;
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1297,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
tx_cb_ptr->skb = skb;
- skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+ skb_len = skb_headlen(skb);
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
@@ -1464,6 +1465,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ /* Retain how many bytes will be sent on the wire, without TSB inserted
+ * by transmit checksum offload
+ */
+ GENET_CB(skb)->bytes_sent = skb->len;
+
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
skb = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 967367557309..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
u32 flags;
};
+struct bcmgenet_skb_cb {
+ unsigned int bytes_sent; /* bytes on the wire (no TSB) */
+};
+
+#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
+
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 6619178ed77b..48a7d7dee846 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -917,7 +917,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int frag_len = bp->rx_buffer_size;
if (offset + frag_len > len) {
- BUG_ON(frag != last_frag);
+ if (unlikely(frag != last_frag)) {
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
@@ -945,8 +948,23 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0;
}
+static inline void macb_init_rx_ring(struct macb *bp)
+{
+ dma_addr_t addr;
+ int i;
+
+ addr = bp->rx_buffers_dma;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ bp->rx_ring[i].addr = addr;
+ bp->rx_ring[i].ctrl = 0;
+ addr += bp->rx_buffer_size;
+ }
+ bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+}
+
static int macb_rx(struct macb *bp, int budget)
{
+ bool reset_rx_queue = false;
int received = 0;
unsigned int tail;
int first_frag = -1;
@@ -972,10 +990,18 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_EOF)) {
int dropped;
- BUG_ON(first_frag == -1);
+
+ if (unlikely(first_frag == -1)) {
+ reset_rx_queue = true;
+ continue;
+ }
dropped = macb_rx_frame(bp, first_frag, tail);
first_frag = -1;
+ if (unlikely(dropped < 0)) {
+ reset_rx_queue = true;
+ continue;
+ }
if (!dropped) {
received++;
budget--;
@@ -983,6 +1009,26 @@ static int macb_rx(struct macb *bp, int budget)
}
}
+ if (unlikely(reset_rx_queue)) {
+ unsigned long flags;
+ u32 ctrl;
+
+ netdev_err(bp->dev, "RX queue corruption: reset it\n");
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ ctrl = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+
+ macb_init_rx_ring(bp);
+ macb_writel(bp, RBQP, bp->rx_ring_dma);
+
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return received;
+ }
+
if (first_frag != -1)
bp->rx_tail = first_frag;
else
@@ -1100,7 +1146,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RXUBR));
+ queue_writel(queue, ISR, MACB_BIT(RXUBR));
}
if (status & MACB_BIT(ISR_ROVR)) {
@@ -1523,15 +1569,8 @@ static void gem_init_rings(struct macb *bp)
static void macb_init_rings(struct macb *bp)
{
int i;
- dma_addr_t addr;
- addr = bp->rx_buffers_dma;
- for (i = 0; i < RX_RING_SIZE; i++) {
- bp->rx_ring[i].addr = addr;
- bp->rx_ring[i].ctrl = 0;
- addr += bp->rx_buffer_size;
- }
- bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+ macb_init_rx_ring(bp);
for (i = 0; i < TX_RING_SIZE; i++) {
bp->queues[0].tx_ring[i].addr = 0;
@@ -2957,9 +2996,10 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL);
if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
- if (gpio_is_valid(gpio))
+ if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_direction_output(bp->reset_gpio, 1);
+ gpiod_direction_output(bp->reset_gpio, 1);
+ }
}
of_node_put(phy_node);
@@ -3029,7 +3069,8 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
/* Shutdown the PHY if there is a GPIO reset */
- gpiod_set_value(bp->reset_gpio, 0);
+ if (bp->reset_gpio)
+ gpiod_set_value(bp->reset_gpio, 0);
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2e7a73..a2cdfc1261dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 37c081583084..08243c2ff4b4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -943,8 +943,8 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
+ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
}
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
#endif
/*
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 37d0cce392be..e8d36aaea223 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -469,7 +469,7 @@ struct hnae_ae_ops {
u32 *tx_usecs, u32 *rx_usecs);
void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
u32 *tx_frames, u32 *rx_frames);
- void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+ int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
u32 coalesce_frames);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 285c893ab135..a1cb461ac45f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
ring_pair_cb->used_by_vf = 1;
- if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
- ring_pair_cb->port_id_in_dsa = port_idx;
- else
- ring_pair_cb->port_id_in_dsa = 0;
-
ring_pair_cb++;
}
@@ -453,59 +448,46 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
u32 *tx_usecs, u32 *rx_usecs)
{
- int port;
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- *tx_usecs = hns_rcb_get_coalesce_usecs(
- hns_ae_get_dsaf_dev(handle->dev),
- hns_dsaf_get_comm_idx_by_port(port));
- *rx_usecs = hns_rcb_get_coalesce_usecs(
- hns_ae_get_dsaf_dev(handle->dev),
- hns_dsaf_get_comm_idx_by_port(port));
+ *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+ *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
}
static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
u32 *tx_frames, u32 *rx_frames)
{
- int port;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- assert(handle);
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
-
- *tx_frames = hns_rcb_get_coalesced_frames(
- hns_ae_get_dsaf_dev(handle->dev), port);
- *rx_frames = hns_rcb_get_coalesced_frames(
- hns_ae_get_dsaf_dev(handle->dev), port);
+ *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+ *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
}
-static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
- u32 timeout)
+static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+ u32 timeout)
{
- int port;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- assert(handle);
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
-
- hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
- port, timeout);
+ return hns_rcb_set_coalesce_usecs(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
}
static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
u32 coalesce_frames)
{
- int port;
- int ret;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- assert(handle);
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
-
- ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
- port, coalesce_frames);
- return ret;
+ return hns_rcb_set_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, coalesce_frames);
}
void hns_ae_update_stats(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 6e2b76ede075..44abb08de155 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -664,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
- snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+ snprintf(buff, ETH_GSTRING_LEN, "%s",
+ g_gmac_stats_string[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5c1ac9ba1bf2..5978a5c8ef35 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2219,17 +2219,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
/* dsaf onode registers */
for (i = 0; i < DSAF_XOD_NUM; i++) {
p[311 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
p[319 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
p[327 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
p[335 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
p[343 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
p[351 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
}
p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 607c3be42241..e69b02287c44 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
*/
phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
- u32 hilink3_mode;
- u32 hilink4_mode;
+ u32 mode;
+ u32 reg;
+ u32 shift;
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
- int dev_id = mac_cb->mac_id;
+ int mac_id = mac_cb->mac_id;
phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
- hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
- hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
- if (dev_id >= 0 && dev_id <= 3) {
- if (hilink4_mode == 0)
- phy_if = PHY_INTERFACE_MODE_SGMII;
- else
+ if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ } else if (mac_id >= 0 && mac_id <= 3) {
+ reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
+ mode = dsaf_read_reg(sys_ctl_vaddr, reg);
+ /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
+ shift = is_ver1 ? 0 : mac_id;
+ if (dsaf_get_bit(mode, shift))
phy_if = PHY_INTERFACE_MODE_XGMII;
- } else if (dev_id >= 4 && dev_id <= 5) {
- if (hilink3_mode == 0)
- phy_if = PHY_INTERFACE_MODE_SGMII;
else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ } else if (mac_id >= 4 && mac_id <= 7) {
+ reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
+ mode = dsaf_read_reg(sys_ctl_vaddr, reg);
+ /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
+ shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
+ if (dsaf_get_bit(mode, shift))
phy_if = PHY_INTERFACE_MODE_XGMII;
- } else {
- phy_if = PHY_INTERFACE_MODE_SGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
}
-
- dev_dbg(mac_cb->dev,
- "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
- hilink3_mode, hilink4_mode, dev_id, phy_if);
return phy_if;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 12188807468c..28ee26e5c478 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
bd_size_type);
dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
} else {
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
(u32)dma);
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
bd_size_type);
dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
}
}
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
desc_cnt);
}
-/**
- *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- *@coalesced_frames:BD num for coalesced frames
- */
-static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
- u32 port_idx,
- u32 coalesced_frames)
-{
- if (coalesced_frames >= rcb_common->desc_num ||
- coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
- return -EINVAL;
-
- dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
- coalesced_frames);
- return 0;
-}
-
-/**
- *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- * return coaleseced frames value
- */
-static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
- u32 port_idx)
+static void hns_rcb_set_port_timeout(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
- if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
- port_idx = 0;
-
- return dsaf_read_dev(rcb_common,
- RCB_CFG_PKTLINE_REG + port_idx * 4);
-}
-
-/**
- *hns_rcb_set_timeout - set rcb port coalesced time_out
- *@rcb_common: rcb_common device
- *@time_out:time for coalesced time_out
- */
-static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
- u32 timeout)
-{
- dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
+ timeout * HNS_RCB_CLK_FREQ_MHZ);
+ else
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
}
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
for (i = 0; i < port_num; i++) {
hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
- (void)hns_rcb_set_port_coalesced_frames(
- rcb_common, i, rcb_common->coalesced_frames);
+ (void)hns_rcb_set_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+ hns_rcb_set_port_timeout(
+ rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
}
- hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
HNS_RCB_COMMON_ENDIAN);
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
}
-static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+static int hns_rcb_get_port_in_comm(
+ struct rcb_common_cb *rcb_common, int ring_idx)
{
int comm_index = rcb_common->comm_index;
int port;
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
port = ring_idx / q_num;
} else {
- port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+ port = 0; /* config debug-ports port_id_in_comm to 0*/
}
return port;
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->index = i;
ring_pair_cb->q.io_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
- ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+ ring_pair_cb->port_id_in_comm =
+ hns_rcb_get_port_in_comm(rcb_common, i);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
/**
*hns_rcb_get_coalesced_frames - get rcb port coalesced frames
*@rcb_common: rcb_common device
- *@comm_index:port index
- *return coalesced_frames
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
*/
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+u32 hns_rcb_get_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
{
- int comm_index = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
- return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+ return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
}
/**
*hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
*@rcb_common: rcb_common device
- *@comm_index:port index
- *return time_out
+ *@port_idx:port id in comm
+ *
+ *Returns: time_out
*/
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
{
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
- return rcb_comm->timeout;
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
+ HNS_RCB_CLK_FREQ_MHZ;
+ else
+ return dsaf_read_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
}
/**
*hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
*@rcb_common: rcb_common device
- *@comm_index: comm :index
- *@etx_usecs:tx time for coalesced time_out
- *@rx_usecs:rx time for coalesced time_out
+ *@port_idx:port id in comm
+ *@timeout:tx/rx time for coalesced time_out
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
*/
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
- int port, u32 timeout)
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
- int comm_index = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+ u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
- if (rcb_comm->timeout == timeout)
- return;
+ if (timeout == old_timeout)
+ return 0;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- dev_err(dsaf_dev->dev,
- "error: not support coalesce_usecs setting!\n");
- return;
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_usecs setting!\n");
+ return -EINVAL;
+ }
}
- rcb_comm->timeout = timeout;
- hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+ if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce %dus!\n", timeout);
+ return -EINVAL;
+ }
+ hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ return 0;
}
/**
*hns_rcb_set_coalesced_frames - set rcb coalesced frames
*@rcb_common: rcb_common device
- *@tx_frames:tx BD num for coalesced frames
- *@rx_frames:rx BD num for coalesced frames
- *Return 0 on success, negative on failure
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
*/
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
- int port, u32 coalesced_frames)
+int hns_rcb_set_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
{
- int comm_index = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
- u32 coalesced_reg_val;
- int ret;
+ u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
- coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
-
- if (coalesced_reg_val == coalesced_frames)
+ if (coalesced_frames == old_waterline)
return 0;
- if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
- ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
- coalesced_frames);
- return ret;
- } else {
+ if (coalesced_frames >= rcb_common->desc_num ||
+ coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
+ coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_frames setting!\n");
return -EINVAL;
}
+
+ dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+ coalesced_frames);
+ return 0;
}
/**
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
rcb_common->dsaf_dev = dsaf_dev;
rcb_common->desc_num = dsaf_dev->desc_num;
- rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
- rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
rcb_common->max_vfn = max_vfn;
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index)
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
{
u32 *regs = data;
+ bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
+ bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+ u32 reg_tmp;
+ u32 reg_num_tmp;
u32 i = 0;
/*rcb common registers */
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
= dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
}
- regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
- regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
- regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+ reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
+ reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
+ for (i = 0; i < reg_num_tmp; i++)
+ regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
+
+ regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+ regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
/* mark end of rcb common regs */
- for (i = 73; i < 80; i++)
+ for (i = 78; i < 80; i++)
regs[i] = 0xcccccccc;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 81fe9f849973..eb61014ad615 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -38,7 +38,9 @@ struct rcb_common_cb;
#define HNS_RCB_MAX_COALESCED_FRAMES 1023
#define HNS_RCB_MIN_COALESCED_FRAMES 1
#define HNS_RCB_DEF_COALESCED_FRAMES 50
-#define HNS_RCB_MAX_TIME_OUT 0x500
+#define HNS_RCB_CLK_FREQ_MHZ 350
+#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
+#define HNS_RCB_DEF_COALESCED_USECS 3
#define HNS_RCB_COMMON_ENDIAN 1
@@ -82,7 +84,7 @@ struct ring_pair_cb {
int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
- u8 port_id_in_dsa;
+ u8 port_id_in_comm;
u8 used_by_vf;
struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@ struct rcb_common_cb {
u8 comm_index;
u32 ring_num;
- u32 coalesced_frames; /* frames threshold of rx interrupt */
- u32 timeout; /* time threshold of rx interrupt */
u32 desc_num; /* desc num per queue*/
struct ring_pair_cb ring_pair_cb[0];
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
- int comm_index, u32 timeout);
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
- int comm_index, u32 coalesce_frames);
+u32 hns_rcb_get_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
+int hns_rcb_set_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
void hns_rcb_update_stats(struct hnae_queue *queue);
void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index bf62687e5ea7..7d7204f45e78 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -103,6 +103,8 @@
/*serdes offset**/
#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
+#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
@@ -404,6 +406,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 71aa37b4b338..687204b780b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -913,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int head = ring->next_to_clean;
-
- /* for hardware bug fixed */
- head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -959,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 0);
-
- ring_data->fini_process(ring_data);
+ if (ring_data->fini_process)
+ ring_data->fini_process(ring_data);
return 0;
}
@@ -1723,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
+ bool is_ver1 = AE_IS_VER1(priv->enet_ver);
int i;
if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1740,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
- rd->fini_process = hns_nic_tx_fini_pro;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1752,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
- rd->fini_process = hns_nic_rx_fini_pro;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1816,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
h = hnae_get_handle(&priv->netdev->dev,
priv->ae_node, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
- ret = PTR_ERR(h);
+ ret = -ENODEV;
dev_dbg(priv->dev, "has not handle, register notifier!\n");
goto out;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 9c3ba65988e1..3d746c887873 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev,
(!ops->set_coalesce_frames))
return -ESRCH;
- ops->set_coalesce_usecs(priv->ae_handle,
- ec->rx_coalesce_usecs);
+ ret = ops->set_coalesce_usecs(priv->ae_handle,
+ ec->rx_coalesce_usecs);
+ if (ret)
+ return ret;
ret = ops->set_coalesce_frames(
priv->ae_handle,
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
struct phy_device *phy_dev = priv->phy;
retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
- retval = phy_write(phy_dev, HNS_LED_FC_REG, value);
- retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+ retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
+ retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
if (retval) {
netdev_err(netdev, "mdiobus_write fail !\n");
return retval;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde699ba..ae90d4f12b70 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
return __e1000_maybe_stop_tx(netdev, size);
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
+#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags, mss);
if (count) {
+ /* The descriptors needed is higher than other Intel drivers
+ * due to a number of workarounds. The breakdown is below:
+ * Data descriptors: MAX_SKB_FRAGS + 1
+ * Context Descriptor: 1
+ * Keep head from touching tail: 2
+ * Workarounds: 3
+ */
+ int desc_needed = MAX_SKB_FRAGS + 7;
+
netdev_sent_queue(netdev, skb->len);
skb_tx_timestamp(skb);
e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+
+ /* 82544 potentially requires twice as many data descriptors
+ * in order to guarantee buffers don't end on evenly-aligned
+ * dwords
+ */
+ if (adapter->pcix_82544)
+ desc_needed += MAX_SKB_FRAGS + 1;
+
/* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 67006431726a..344912957cab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8559,6 +8559,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+ I40E_FLAG_NO_PCI_LINK_CHECK |
I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 84fa28ceb200..e4949af7dd6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -661,9 +661,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
-#ifdef CONFIG_IXGBE_VXLAN
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
-#endif
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
/* Tx fast path data */
@@ -675,6 +673,9 @@ struct ixgbe_adapter {
int num_rx_queues;
u16 rx_itr_setting;
+ /* Port number used to identify VXLAN traffic */
+ __be16 vxlan_port;
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -782,9 +783,6 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
-#ifdef CONFIG_IXGBE_VXLAN
- u16 vxlan_port;
-#endif
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
+int ixgbe_open(struct net_device *netdev);
+int ixgbe_close(struct net_device *netdev);
void ixgbe_up(struct ixgbe_adapter *adapter);
void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 726e0eeee63b..b3530e1e3ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ ixgbe_close(netdev);
else
ixgbe_reset(adapter);
@@ -2091,7 +2091,7 @@ skip_loopback:
/* clear testing bit and return adapter to previous state */
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ ixgbe_open(netdev);
else if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 569cb0757c93..7df3fe29b210 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
-#ifdef CONFIG_IXGBE_VXLAN
adapter->vxlan_port = 0;
-#endif
break;
default:
break;
@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int ixgbe_open(struct net_device *netdev)
+int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int ixgbe_close(struct net_device *netdev)
+int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ipv6hdr *ipv6;
} hdr;
struct tcphdr *th;
+ unsigned int hlen;
struct sk_buff *skb;
-#ifdef CONFIG_IXGBE_VXLAN
- u8 encap = false;
-#endif /* CONFIG_IXGBE_VXLAN */
__be16 vlan_id;
+ int l4_proto;
/* if ring doesn't have a interrupt vector, cannot perform ATR */
if (!q_vector)
@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
ring->atr_count++;
+ /* currently only IPv4/IPv6 with TCP is supported */
+ if ((first->protocol != htons(ETH_P_IP)) &&
+ (first->protocol != htons(ETH_P_IPV6)))
+ return;
+
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
- if (!skb->encapsulation) {
- th = tcp_hdr(skb);
- } else {
#ifdef CONFIG_IXGBE_VXLAN
+ if (skb->encapsulation &&
+ first->protocol == htons(ETH_P_IP) &&
+ hdr.ipv4->protocol != IPPROTO_UDP) {
struct ixgbe_adapter *adapter = q_vector->adapter;
- if (!adapter->vxlan_port)
- return;
- if (first->protocol != htons(ETH_P_IP) ||
- hdr.ipv4->version != IPVERSION ||
- hdr.ipv4->protocol != IPPROTO_UDP) {
- return;
- }
- if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
- return;
- encap = true;
- hdr.network = skb_inner_network_header(skb);
- th = inner_tcp_hdr(skb);
-#else
- return;
-#endif /* CONFIG_IXGBE_VXLAN */
+ /* verify the port is recognized as VXLAN */
+ if (adapter->vxlan_port &&
+ udp_hdr(skb)->dest == adapter->vxlan_port)
+ hdr.network = skb_inner_network_header(skb);
}
+#endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
case IPVERSION:
- if (hdr.ipv4->protocol != IPPROTO_TCP)
- return;
+ /* access ihl as u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ l4_proto = hdr.ipv4->protocol;
break;
case 6:
- if (likely((unsigned char *)th - hdr.network ==
- sizeof(struct ipv6hdr))) {
- if (hdr.ipv6->nexthdr != IPPROTO_TCP)
- return;
- } else {
- __be16 frag_off;
- u8 l4_hdr;
-
- ipv6_skip_exthdr(skb, hdr.network - skb->data +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- return;
- if (l4_hdr != IPPROTO_TCP)
- return;
- }
+ hlen = hdr.network - skb->data;
+ l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+ hlen -= hdr.network - skb->data;
break;
default:
return;
}
- /* skip this packet since it is invalid or the socket is closing */
- if (!th || th->fin)
+ if (l4_proto != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(hdr.network + hlen);
+
+ /* skip this packet since the socket is closing */
+ if (th->fin)
return;
/* sample on all syn packets or once every atr sample count */
@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
break;
}
-#ifdef CONFIG_IXGBE_VXLAN
- if (encap)
+ if (hdr.network != skb_network_header(skb))
input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
-#endif /* CONFIG_IXGBE_VXLAN */
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls)
{
+ u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
+ u32 loc;
int err;
+ if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
+ return -EINVAL;
+
+ loc = cls->knode.handle & 0xfffff;
+
spin_lock(&adapter->fdir_perfect_lock);
- err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle);
+ err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
spin_unlock(&adapter->fdir_perfect_lock);
return err;
}
@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
__be16 protocol,
struct tc_cls_u32_offload *cls)
{
+ u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+ if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
/* This ixgbe devices do not support hash tables at the moment
* so abort when given hash tables.
*/
if (cls->hnode.divisor > 0)
return -EINVAL;
- set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
+ set_bit(uhtid - 1, &adapter->tables);
return 0;
}
static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls)
{
- clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
+ u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+ if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
+ clear_bit(uhtid - 1, &adapter->tables);
return 0;
}
@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
#endif
int i, err = 0;
u8 queue;
- u32 handle;
+ u32 uhtid, link_uhtid;
memset(&mask, 0, sizeof(union ixgbe_atr_input));
- handle = cls->knode.handle;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+ link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
- /* At the moment cls_u32 jumps to transport layer and skips past
+ /* At the moment cls_u32 jumps to network layer and skips past
* L2 headers. The canonical method to match L2 frames is to use
* negative values. However this is error prone at best but really
* just broken because there is no way to "know" what sort of hdr
- * is in front of the transport layer. Fix cls_u32 to support L2
+ * is in front of the network layer. Fix cls_u32 to support L2
* headers when needed.
*/
if (protocol != htons(ETH_P_IP))
return -EINVAL;
- if (cls->knode.link_handle ||
- cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
+ if (link_uhtid) {
struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
- u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
- if (!test_bit(uhtid, &adapter->tables))
+ if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
+ if (!test_bit(link_uhtid - 1, &adapter->tables))
return -EINVAL;
for (i = 0; nexthdr[i].jump; i++) {
@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
nexthdr->mask != cls->knode.sel->keys[0].mask)
return -EINVAL;
- if (uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- adapter->jump_tables[uhtid] = nexthdr->jump;
+ adapter->jump_tables[link_uhtid] = nexthdr->jump;
}
return 0;
}
@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* To add support for new nodes update ixgbe_model.h parse structures
* this function _should_ be generic try not to hardcode values here.
*/
- if (TC_U32_USERHTID(handle) == 0x800) {
+ if (uhtid == 0x800) {
field_ptr = adapter->jump_tables[0];
} else {
- if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables))
+ if (uhtid >= IXGBE_MAX_LINK_HANDLE)
return -EINVAL;
- field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)];
+ field_ptr = adapter->jump_tables[uhtid];
}
if (!field_ptr)
@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
int j;
for (j = 0; field_ptr[j].val; j++) {
- if (field_ptr[j].off == off &&
- field_ptr[j].mask == m) {
+ if (field_ptr[j].off == off) {
field_ptr[j].val(input, &mask, val, m);
input->filter.formatted.flow_type |=
field_ptr[j].type;
@@ -8393,8 +8391,8 @@ err_out:
return -EINVAL;
}
-int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u16 new_port = ntohs(port);
if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
if (sa_family == AF_INET6)
return;
- if (adapter->vxlan_port == new_port)
+ if (adapter->vxlan_port == port)
return;
if (adapter->vxlan_port) {
netdev_info(dev,
"Hit Max num of VXLAN ports, not adding port %d\n",
- new_port);
+ ntohs(port));
return;
}
- adapter->vxlan_port = new_port;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+ adapter->vxlan_port = port;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
}
/**
@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
__be16 port)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- u16 new_port = ntohs(port);
if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
if (sa_family == AF_INET6)
return;
- if (adapter->vxlan_port != new_port) {
+ if (adapter->vxlan_port != port) {
netdev_info(dev, "Port %d was not found, not deleting\n",
- new_port);
+ ntohs(port));
return;
}
@@ -9265,17 +9261,6 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
-#ifdef CONFIG_IXGBE_VXLAN
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
- break;
- default:
- break;
- }
-#endif /* CONFIG_IXGBE_VXLAN */
-
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
#endif
@@ -9329,6 +9314,8 @@ skip_sriov:
goto err_sw_init;
}
+ /* Set hw->mac.addr to permanent MAC address */
+ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index ce48872d4782..74c53ad9d268 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -32,7 +32,6 @@
struct ixgbe_mat_field {
unsigned int off;
- unsigned int mask;
int (*val)(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m);
@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
}
static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
- { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip,
+ { .off = 12, .val = ixgbe_mat_prgm_sip,
.type = IXGBE_ATR_FLOW_TYPE_IPV4},
- { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip,
+ { .off = 16, .val = ixgbe_mat_prgm_dip,
.type = IXGBE_ATR_FLOW_TYPE_IPV4},
{ .val = NULL } /* terminal node */
};
-static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input,
+static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
input->filter.formatted.src_port = val & 0xffff;
mask->formatted.src_port = m & 0xffff;
- return 0;
-};
+ input->filter.formatted.dst_port = val >> 16;
+ mask->formatted.dst_port = m >> 16;
-static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
- union ixgbe_atr_input *mask,
- u32 val, u32 m)
-{
- input->filter.formatted.dst_port = val & 0xffff;
- mask->formatted.dst_port = m & 0xffff;
return 0;
};
static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
- {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport,
- .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
- {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
+ {.off = 0, .val = ixgbe_mat_prgm_ports,
.type = IXGBE_ATR_FLOW_TYPE_TCPV4},
{ .val = NULL } /* terminal node */
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 87aca3f7c3de..68a9c646498e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
break;
- usleep_range(10, 20);
+ udelay(10);
}
if (ctrl)
*ctrl = command;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c48aef613b0a..d7aa4b203f40 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ ixgbevf_close(netdev);
else
ixgbevf_reset(adapter);
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
clear_bit(__IXGBEVF_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ ixgbevf_open(netdev);
} else {
hw_dbg(&adapter->hw, "online testing starting\n");
/* Online tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 68ec7daa04fd..991eeae81473 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
+int ixgbevf_open(struct net_device *netdev);
+int ixgbevf_close(struct net_device *netdev);
void ixgbevf_up(struct ixgbevf_adapter *adapter);
void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ea14c0a2e74..b0edae94d73d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int ixgbevf_open(struct net_device *netdev)
+int ixgbevf_open(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3205,7 @@ err_setup_reset:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int ixgbevf_close(struct net_device *netdev)
+int ixgbevf_close(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
- ether_addr_copy(hw->mac.addr, addr->sa_data);
-
spin_lock_bh(&adapter->mbx_lock);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
spin_unlock_bh(&adapter->mbx_lock);
+ if (err)
+ return -EPERM;
+
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61a98f4c5746..4d613a4f2a7f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+ return IXGBE_ERR_MBX;
+ }
return ret_val;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 577f7ca7deba..7fc490225da5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -260,7 +260,6 @@
#define MVNETA_VLAN_TAG_LEN 4
-#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
#define MVNETA_TX_CSUM_DEF_SIZE 1600
#define MVNETA_TX_CSUM_MAX_SIZE 9800
#define MVNETA_ACC_MODE_EXT1 1
@@ -300,7 +299,7 @@
#define MVNETA_RX_PKT_SIZE(mtu) \
ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
ETH_HLEN + ETH_FCS_LEN, \
- MVNETA_CPU_D_CACHE_LINE_SIZE)
+ cache_line_size())
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_phys) && \
@@ -2764,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
if (rxq->descs == NULL)
return -ENOMEM;
- BUG_ON(rxq->descs !=
- PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
rxq->last_desc = rxq->size - 1;
/* Set Rx descriptors queue starting address */
@@ -2837,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
if (txq->descs == NULL)
return -ENOMEM;
- /* Make sure descriptor address is cache line size aligned */
- BUG_ON(txq->descs !=
- PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
txq->last_desc = txq->size - 1;
/* Set maximum bandwidth for enabled TXQs */
@@ -3050,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
return mtu;
}
+static void mvneta_percpu_enable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ disable_percpu_irq(pp->dev->irq);
+}
+
/* Change the device mtu */
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
@@ -3074,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
* reallocation of the queues
*/
mvneta_stop_dev(pp);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
mvneta_cleanup_txqs(pp);
mvneta_cleanup_rxqs(pp);
@@ -3097,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return ret;
}
+ on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
mvneta_port_up(pp);
@@ -3250,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
pp->phy_dev = NULL;
}
-static void mvneta_percpu_enable(void *arg)
-{
- struct mvneta_port *pp = arg;
-
- enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
-}
-
-static void mvneta_percpu_disable(void *arg)
-{
- struct mvneta_port *pp = arg;
-
- disable_percpu_irq(pp->dev->irq);
-}
-
/* Electing a CPU must be done in an atomic way: it should be done
* after or before the removal/insertion of a CPU and this function is
* not reentrant.
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index c797971aefab..868a957f24bb 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -321,7 +321,6 @@
/* Lbtd 802.3 type */
#define MVPP2_IP_LBDT_TYPE 0xfffa
-#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
#define MVPP2_TX_CSUM_MAX_SIZE 9800
/* Timeout constants */
@@ -377,7 +376,7 @@
#define MVPP2_RX_PKT_SIZE(mtu) \
ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
- ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+ ETH_HLEN + ETH_FCS_LEN, cache_line_size())
#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
if (!aggr_txq->descs)
return -ENOMEM;
- /* Make sure descriptor address is cache line size aligned */
- BUG_ON(aggr_txq->descs !=
- PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
aggr_txq->last_desc = aggr_txq->size - 1;
/* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
if (!rxq->descs)
return -ENOMEM;
- BUG_ON(rxq->descs !=
- PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
rxq->last_desc = rxq->size - 1;
/* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
if (!txq->descs)
return -ENOMEM;
- /* Make sure descriptor address is cache line size aligned */
- BUG_ON(txq->descs !=
- PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
/* Map physical Rx queue to port's logical Rx queue */
rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
- if (!rxq)
+ if (!rxq) {
+ err = -ENOMEM;
goto err_free_percpu;
+ }
/* Map this Rx queue to a physical queue */
rxq->id = port->first_rxq + queue;
rxq->port = port->id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index ffd0accc2ec9..2017b0121f5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2750,7 +2750,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode)
{
- int rc;
+ int rc = 0;
/* Configure AEU signal change to produce attentions */
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef332708e5f2..6d31f92ef2b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.34"
+#define DRV_VERSION "1.00.00.35"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4e1a7dba7c4a..087e14a3fba7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1377,11 +1377,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- skb_tx_timestamp(skb);
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
}
+ skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
desc->die_dt = DT_FEND;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b02eed12bfc5..73427e29df2a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
return 0;
err_rx_irq_unmap:
- while (--i)
+ while (i--)
irq_dispose_mapping(priv->rxq[i]->irq_no);
i = SXGBE_TX_QUEUES;
err_tx_irq_unmap:
- while (--i)
+ while (i--)
irq_dispose_mapping(priv->txq[i]->irq_no);
irq_dispose_mapping(priv->irq);
err_drv_remove:
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index e13228f115f0..011386f6f24d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -199,11 +199,6 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
{
unsigned int tdes1 = p->des1;
- if (mode == STMMAC_CHAIN_MODE)
- norm_set_tx_desc_len_on_chain(p, len);
- else
- norm_set_tx_desc_len_on_ring(p, len);
-
if (is_fs)
tdes1 |= TDES1_FIRST_SEGMENT;
else
@@ -217,10 +212,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (ls)
tdes1 |= TDES1_LAST_SEGMENT;
- if (tx_own)
- tdes1 |= TDES0_OWN;
-
p->des1 = tdes1;
+
+ if (mode == STMMAC_CHAIN_MODE)
+ norm_set_tx_desc_len_on_chain(p, len);
+ else
+ norm_set_tx_desc_len_on_ring(p, len);
+
+ if (tx_own)
+ p->des0 |= TDES0_OWN;
}
static void ndesc_set_tx_ic(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c5ce9848ca9..fcbd4be562e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -278,7 +278,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
- char *phy_bus_name = priv->plat->phy_bus_name;
unsigned long flags;
bool ret = false;
@@ -289,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
- /* Never init EEE in case of a switch is attached */
- if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
- goto out;
-
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
int tx_lpi_timer = priv->tx_lpi_timer;
@@ -772,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* At this stage, it could be needed to setup the EEE or adjust some
- * MAC related HW registers.
- */
- priv->eee_enabled = stmmac_eee_init(priv);
+ if (phydev->is_pseudo_fixed_link)
+ /* Stop PHY layer to call the hook to adjust the link in case
+ * of a switch is attached to the stmmac driver.
+ */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+ else
+ /* At this stage, init the EEE if supported.
+ * Never called in case of fixed_link.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
}
/**
@@ -827,12 +828,8 @@ static int stmmac_init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, priv->plat->phy_node,
&stmmac_adjust_link, 0, interface);
} else {
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
@@ -870,11 +867,6 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
- /* If attached to a switch, there is no reason to poll phy handler */
- if (priv->plat->phy_bus_name)
- if (!strcmp(priv->plat->phy_bus_name, "fixed"))
- phydev->irq = PHY_IGNORE_INTERRUPT;
-
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ea76129dafc2..06704ca6f9ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev)
struct mii_bus *new_bus;
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
- int addr, found;
struct device_node *mdio_node = priv->plat->mdio_node;
+ int addr, found;
if (!mdio_bus_data)
return 0;
- if (IS_ENABLED(CONFIG_OF)) {
- if (mdio_node) {
- netdev_dbg(ndev, "FOUND MDIO subnode\n");
- } else {
- netdev_warn(ndev, "No MDIO subnode found\n");
- }
- }
-
new_bus = mdiobus_alloc();
if (new_bus == NULL)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dcbd2a1601e8..cf37ea558ecc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -132,6 +132,69 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
}
/**
+ * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+ * @plat: driver data platform structure
+ * @np: device tree node
+ * @dev: device pointer
+ * Description:
+ * The mdio bus will be allocated in case of a phy transceiver is on board;
+ * it will be NULL if the fixed-link is configured.
+ * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
+ * in any case (for DSA, mdio must be registered even if fixed-link).
+ * The table below sums the supported configurations:
+ * -------------------------------
+ * snps,phy-addr | Y
+ * -------------------------------
+ * phy-handle | Y
+ * -------------------------------
+ * fixed-link | N
+ * -------------------------------
+ * snps,dwmac-mdio |
+ * even if | Y
+ * fixed-link |
+ * -------------------------------
+ *
+ * It returns 0 in case of success otherwise -ENODEV.
+ */
+static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+{
+ bool mdio = true;
+
+ /* If phy-handle property is passed from DT, use it as the PHY */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (plat->phy_node)
+ dev_dbg(dev, "Found phy-handle subnode\n");
+
+ /* If phy-handle is not specified, check if we have a fixed-phy */
+ if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+ if ((of_phy_register_fixed_link(np) < 0))
+ return -ENODEV;
+
+ dev_dbg(dev, "Found fixed-link subnode\n");
+ plat->phy_node = of_node_get(np);
+ mdio = false;
+ }
+
+ /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
+ for_each_child_of_node(np, plat->mdio_node) {
+ if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
+ break;
+ }
+
+ if (plat->mdio_node) {
+ dev_dbg(dev, "Found MDIO subnode\n");
+ mdio = true;
+ }
+
+ if (mdio)
+ plat->mdio_bus_data =
+ devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
+ GFP_KERNEL);
+ return 0;
+}
+
+/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @plat: driver data platform structure
@@ -146,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
- struct device_node *child_node = NULL;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -166,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
/* Default to phy auto-detection */
plat->phy_addr = -1;
- /* If we find a phy-handle property, use it as the PHY */
- plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
-
- /* If phy-handle is not specified, check if we have a fixed-phy */
- if (!plat->phy_node && of_phy_is_fixed_link(np)) {
- if ((of_phy_register_fixed_link(np) < 0))
- return ERR_PTR(-ENODEV);
-
- plat->phy_node = of_node_get(np);
- }
-
- for_each_child_of_node(np, child_node)
- if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
- plat->mdio_node = child_node;
- break;
- }
-
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
- plat->mdio_bus_data = NULL;
- else
- plat->mdio_bus_data =
- devm_kzalloc(&pdev->dev,
- sizeof(struct stmmac_mdio_bus_data),
- GFP_KERNEL);
+ /* To Configure PHY by using all device-tree supported properties */
+ if (stmmac_dt_phy(plat, np, &pdev->dev))
+ return ERR_PTR(-ENODEV);
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b881a7b1e4f6..9636da0b6efc 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -339,6 +339,8 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
@@ -348,6 +350,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7364, 0xfffffff0, },
{ PHY_ID_BCM7366, 0xfffffff0, },
+ { PHY_ID_BCM7346, 0xfffffff0, },
+ { PHY_ID_BCM7362, 0xfffffff0, },
{ PHY_ID_BCM7425, 0xfffffff0, },
{ PHY_ID_BCM7429, 0xfffffff0, },
{ PHY_ID_BCM7439, 0xfffffff0, },
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 26c64d2782fa..a0f64cba86ba 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1198,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1261,6 +1264,8 @@ err_enable_netpoll:
vlan_vids_del_by_dev(port_dev, dev);
err_vids_add:
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
dev_close(port_dev);
err_dev_open:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afdf950617c3..2c9e45f50edb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -622,7 +622,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (!err)
goto out;
}
@@ -1014,7 +1015,6 @@ static void tun_net_init(struct net_device *dev)
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
case IFF_TAP:
@@ -1026,7 +1026,6 @@ static void tun_net_init(struct net_device *dev)
eth_hw_addr_random(dev);
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
}
}
@@ -1480,6 +1479,8 @@ static void tun_setup(struct net_device *dev)
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
+ /* We prefer our own queue length */
+ dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1822,7 +1823,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- sk_detach_filter(tfile->socket.sk);
+ __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
}
tun->filter_attached = false;
@@ -1835,7 +1836,8 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (ret) {
tun_detach_filter(tun, i);
return ret;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 86ba30ba35e8..2fb31edab125 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
+ /* Telit LE910 V2 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
+
/* DW5812 LTE Verizon Mobile Broadband Card
* Unlike DW5550 this device requires FLAG_NOARP
*/
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fcaccf5..22e1a9a99a7d 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -38,7 +38,7 @@
* HEADS UP: this handshaking isn't all that robust. This driver
* gets confused easily if you unplug one end of the cable then
* try to connect it again; you'll need to restart both ends. The
- * "naplink" software (used by some PlayStation/2 deveopers) does
+ * "naplink" software (used by some PlayStation/2 developers) does
* the handshaking much better! Also, sometimes this hardware
* seems to get wedged under load. Prolific docs are weak, and
* don't identify differences between PL2301 and PL2302, much less
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 7d717c66bcb0..9d1fce8a6e84 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index c32cbb593600..f068b6513cd2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, 0);
return 0;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index fc82743aefb6..19f822d7f652 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = {
.out_num = 2,
- .out_sizes = { 4, 8, },
+ .out_sizes = { 4, 128, },
},
[ND_CMD_SMART_THRESHOLD] = {
.out_num = 2,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 79646d0c3277..182a93fe3712 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
set_badblock(bb, start_sector, num_sectors);
}
-static void namespace_add_poison(struct list_head *poison_list,
- struct badblocks *bb, struct resource *res)
+static void badblocks_populate(struct list_head *poison_list,
+ struct badblocks *bb, const struct resource *res)
{
struct nd_poison *pl;
@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list,
}
/**
- * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks
- * @ndns: the namespace containing poison ranges
- * @bb: badblocks instance to populate
- * @offset: offset at the start of the namespace before 'sector 0'
+ * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
+ * @region: parent region of the range to interrogate
+ * @bb: badblocks instance to populate
+ * @res: resource range to consider
*
- * The poison list generated during NFIT initialization may contain multiple,
- * possibly overlapping ranges in the SPA (System Physical Address) space.
- * Compare each of these ranges to the namespace currently being initialized,
- * and add badblocks to the gendisk for all matching sub-ranges
+ * The poison list generated during bus initialization may contain
+ * multiple, possibly overlapping physical address ranges. Compare each
+ * of these ranges to the resource range currently being initialized,
+ * and add badblocks entries for all matching sub-ranges
*/
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
- struct badblocks *bb, resource_size_t offset)
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+ struct badblocks *bb, const struct resource *res)
{
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
struct nvdimm_bus *nvdimm_bus;
struct list_head *poison_list;
- struct resource res = {
- .start = nsio->res.start + offset,
- .end = nsio->res.end,
- };
- nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent);
+ if (!is_nd_pmem(&nd_region->dev)) {
+ dev_WARN_ONCE(&nd_region->dev, 1,
+ "%s only valid for pmem regions\n", __func__);
+ return;
+ }
+ nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
poison_list = &nvdimm_bus->poison_list;
nvdimm_bus_lock(&nvdimm_bus->dev);
- namespace_add_poison(poison_list, bb, &res);
+ badblocks_populate(poison_list, bb, res);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
-EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison);
+EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1799bd97a9ce..875c524fafb0 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
- struct badblocks *bb, resource_size_t offset);
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+ struct badblocks *bb, const struct resource *res);
int nd_blk_region_init(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 254d3bc13f70..e071e214feba 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
} else {
/* from init we validate */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
- return -EINVAL;
+ return -ENODEV;
}
if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ca5721c306bb..f798899338ed 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -99,10 +99,24 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(bad_pmem))
rc = -EIO;
else {
- memcpy_from_pmem(mem + off, pmem_addr, len);
+ rc = memcpy_from_pmem(mem + off, pmem_addr, len);
flush_dcache_page(page);
}
} else {
+ /*
+ * Note that we write the data both before and after
+ * clearing poison. The write before clear poison
+ * handles situations where the latest written data is
+ * preserved and the clear poison operation simply marks
+ * the address range as valid without changing the data.
+ * In this case application software can assume that an
+ * interrupted write will either return the new good
+ * data or an error.
+ *
+ * However, if pmem_clear_poison() leaves the data in an
+ * indeterminate state we need to perform the write
+ * after clear poison.
+ */
flush_dcache_page(page);
memcpy_to_pmem(pmem_addr, mem + off, len);
if (unlikely(bad_pmem)) {
@@ -151,7 +165,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_disk->private_data;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
if (rw & WRITE)
wmb_pmem();
@@ -244,7 +258,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
int nid = dev_to_node(dev);
+ struct resource bb_res;
struct gendisk *disk;
blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -271,8 +287,17 @@ static int pmem_attach_disk(struct device *dev,
devm_exit_badblocks(dev, &pmem->bb);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
-
+ bb_res.start = nsio->res.start + pmem->data_offset;
+ bb_res.end = nsio->res.end;
+ if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+ bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
+ bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+ }
+ nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
+ &bb_res);
disk->bb = &pmem->bb;
add_disk(disk);
revalidate_disk(disk);
@@ -295,7 +320,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
return -EIO;
- memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
+ return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
} else {
memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
wmb_pmem();
@@ -553,7 +578,7 @@ static int nd_pmem_probe(struct device *dev)
ndns->rw_bytes = pmem_rw_bytes;
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
if (is_nd_btt(dev)) {
/* btt allocates its own request_queue */
@@ -595,14 +620,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
struct pmem_device *pmem = dev_get_drvdata(dev);
struct nd_namespace_common *ndns = pmem->ndns;
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct resource res = {
+ .start = nsio->res.start + pmem->data_offset,
+ .end = nsio->res.end,
+ };
if (event != NVDIMM_REVALIDATE_POISON)
return;
- if (is_nd_btt(dev))
- nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
- else
- nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
+ if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+ res.start += __le32_to_cpu(pfn_sb->start_pad);
+ res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+ }
+
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
}
MODULE_ALIAS("pmem");
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 24ccda303efb..4fd733ff72b1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result > 0) {
dev_err(dev->ctrl.device,
"Could not set queue count (%d)\n", result);
- nr_io_queues = 0;
- result = 0;
+ return 0;
}
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
- if (!pdev->irq)
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ else if (pdev->msix_enabled)
pci_disable_msix(pdev);
for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (pci_enable_device_mem(pdev))
return result;
- dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
}
/*
- * Some devices don't advertse INTx interrupts, pre-enable a single
- * MSIX vec for setup. We'll adjust this later.
+ * Some devices and/or platforms don't advertise or work with INTx
+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+ * adjust this later.
*/
- if (!pdev->irq) {
- result = pci_enable_msix(pdev, dev->entry, 1);
- if (result < 0)
- goto disable;
+ if (pci_enable_msix(pdev, dev->entry, 1)) {
+ pci_enable_msi(pdev);
+ dev->entry[0].vector = pdev->irq;
+ }
+
+ if (!dev->entry[0].vector) {
+ result = -ENODEV;
+ goto disable;
}
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ goto out;
+
set_bit(NVME_CTRL_RESETTING, &dev->flags);
result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- del_timer_sync(&dev->watchdog_timer);
-
set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
flush_work(&dev->async_work);
+ flush_work(&dev->reset_work);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac6300c79..a0e5260bd006 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *root_inode;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = OPROFILEFS_MAGIC;
sb->s_op = &s_ops;
sb->s_time_gran = 1;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010f0ed1..342b6918bbde 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+ if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
size = dev->cfg_size;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
int stschg_irq; /* card-status-change irq */
int card_irq; /* card irq */
int eject_irq; /* db1200/pb1200 have these */
+ int insert_gpio; /* db1000 carddetect gpio */
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
/* carddetect gpio: low-active */
static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
{
- return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+ return !gpio_get_value(sock->insert_gpio);
}
static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
sock->card_irq = r ? r->start : 0;
- /* insert: irq which triggers on card insertion/ejection */
+ /* insert: irq which triggers on card insertion/ejection
+ * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
+ */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
sock->insert_irq = r ? r->start : -1;
+ if (sock->board_type == BOARD_TYPE_DEFAULT) {
+ sock->insert_gpio = r ? r->start : -1;
+ sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
+ }
/* stschg: irq which trigger on card status change (optional) */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 46210512d8ec..9cfa544072b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (of_property_read_bool(dev_np, "fsl,input-sel")) {
np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
- if (np) {
- ipctl->input_sel_base = of_iomap(np, 0);
- if (IS_ERR(ipctl->input_sel_base)) {
- of_node_put(np);
- dev_err(&pdev->dev,
- "iomuxc input select base address not found\n");
- return PTR_ERR(ipctl->input_sel_base);
- }
- } else {
+ if (!np) {
dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
return -EINVAL;
}
+
+ ipctl->input_sel_base = of_iomap(np, 0);
of_node_put(np);
+ if (!ipctl->input_sel_base) {
+ dev_err(&pdev->dev,
+ "iomuxc input select base address not found\n");
+ return -ENOMEM;
+ }
}
imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85536b467c25..6c2c816f8e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
spin_unlock(&pctrl->lock);
}
+static void intel_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct intel_community *community;
+ unsigned pin = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ community = intel_get_community(pctrl, pin);
+ if (community) {
+ unsigned padno = pin_to_padno(community, pin);
+ unsigned gpp_size = community->gpp_size;
+ unsigned gpp_offset = padno % gpp_size;
+ unsigned gpp = padno / gpp_size;
+ u32 value;
+
+ /* Clear interrupt status first to avoid unexpected interrupt */
+ writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+
+ value = readl(community->regs + community->ie_offset + gpp * 4);
+ value |= BIT(gpp_offset);
+ writel(value, community->regs + community->ie_offset + gpp * 4);
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
value |= PADCFG0_RXINV;
} else if (type & IRQ_TYPE_EDGE_RISING) {
value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
- } else if (type & IRQ_TYPE_LEVEL_LOW) {
- value |= PADCFG0_RXINV;
+ } else if (type & IRQ_TYPE_LEVEL_MASK) {
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value |= PADCFG0_RXINV;
} else {
value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
}
@@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
+ .irq_enable = intel_gpio_irq_enable,
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
.irq_unmask = intel_gpio_irq_unmask,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352406108fa0..c8969dd49449 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
int val;
if (pull)
- pullidx = data_out ? 1 : 2;
+ pullidx = data_out ? 2 : 1;
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736cb1a6..2673cd9d106e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
"mfio83",
};
-static const char * const pistachio_sys_pll_lock_groups[] = {
+static const char * const pistachio_audio_pll_lock_groups[] = {
"mfio84",
};
-static const char * const pistachio_wifi_pll_lock_groups[] = {
+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
"mfio85",
};
-static const char * const pistachio_bt_pll_lock_groups[] = {
+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
"mfio86",
};
-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+static const char * const pistachio_sys_pll_lock_groups[] = {
"mfio87",
};
-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+static const char * const pistachio_wifi_pll_lock_groups[] = {
"mfio88",
};
-static const char * const pistachio_audio_pll_lock_groups[] = {
+static const char * const pistachio_bt_pll_lock_groups[] = {
"mfio89",
};
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
PISTACHIO_FUNCTION_DREQ4,
PISTACHIO_FUNCTION_DREQ5,
PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
+ PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
PISTACHIO_FUNCTION_SYS_PLL_LOCK,
PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
PISTACHIO_FUNCTION_BT_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
- PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
FUNCTION(dreq4),
FUNCTION(dreq5),
FUNCTION(mips_pll_lock),
+ FUNCTION(audio_pll_lock),
+ FUNCTION(rpu_v_pll_lock),
+ FUNCTION(rpu_l_pll_lock),
FUNCTION(sys_pll_lock),
FUNCTION(wifi_pll_lock),
FUNCTION(bt_pll_lock),
- FUNCTION(rpu_v_pll_lock),
- FUNCTION(rpu_l_pll_lock),
- FUNCTION(audio_pll_lock),
FUNCTION(debug_raw_cca_ind),
FUNCTION(debug_ed_sec20_cca_ind),
FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b78140a..a13f2b6f6fc0 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
return 0;
}
+/*
+ * gpiolib gpiod_to_irq callback function.
+ * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
+ */
+static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
+ int i;
+
+ for (i = 0; i < info->num_exin; i++)
+ if (info->exin[i] == offset)
+ return ltq_eiu_get_irq(i);
+
+ return -1;
+}
+
static struct gpio_chip xway_chip = {
.label = "gpio-xway",
.direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
.set = xway_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
+ .to_irq = xway_gpio_to_irq,
.base = -1,
};
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b5d81ced6ce6..b68ae424cee2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99);
.pins = gpio##id##_pins, \
.npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
.funcs = (int[]){ \
- qca_mux_NA, /* gpio mode */ \
+ qca_mux_gpio, /* gpio mode */ \
qca_mux_##f1, \
qca_mux_##f2, \
qca_mux_##f3, \
@@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99);
qca_mux_##f14 \
}, \
.nfuncs = 15, \
- .ctl_reg = 0x1000 + 0x10 * id, \
- .io_reg = 0x1004 + 0x10 * id, \
- .intr_cfg_reg = 0x1008 + 0x10 * id, \
- .intr_status_reg = 0x100c + 0x10 * id, \
- .intr_target_reg = 0x400 + 0x4 * id, \
+ .ctl_reg = 0x0 + 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
.nfunctions = ARRAY_SIZE(ipq4019_functions),
.groups = ipq4019_groups,
.ngroups = ARRAY_SIZE(ipq4019_groups),
- .ngpios = 70,
+ .ngpios = 100,
};
static int ipq4019_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index dc3609f0c60b..ee0c1f2567d9 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
return ret;
}
- pinctrl_provide_dummies();
+ /* Enable dummy states for those platforms without pinctrl support */
+ if (!of_have_populated_dt())
+ pinctrl_provide_dummies();
ret = sh_pfc_init_ranges(pfc);
if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.pins = sun8i_a33_pins,
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
+ .irq_bank_base = 1,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 12a1dfabb1af..3b017dbd289c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_cfg_reg(d->hwirq);
+ u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
u32 regval;
@@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
static void sunxi_pinctrl_irq_ack(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 status_reg = sunxi_irq_status_reg(d->hwirq);
+ u32 status_reg = sunxi_irq_status_reg(d->hwirq,
+ pctl->desc->irq_bank_base);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
/* Clear the IRQ */
@@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (bank == pctl->desc->irq_banks)
return;
- reg = sunxi_irq_status_reg_from_bank(bank);
+ reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
val = readl(pctl->membase + reg);
if (val) {
@@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
for (i = 0; i < pctl->desc->irq_banks; i++) {
/* Mask and clear all IRQs before registering a handler */
- writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+ writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
writel(0xffffffff,
- pctl->membase + sunxi_irq_status_reg_from_bank(i));
+ pctl->membase + sunxi_irq_status_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
irq_set_chained_handler_and_data(pctl->irq[i],
sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
int npins;
unsigned pin_base;
unsigned irq_banks;
+ unsigned irq_bank_base;
bool irq_read_needs_mux;
};
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
return pin_num * PULL_PINS_BITS;
}
-static inline u32 sunxi_irq_cfg_reg(u16 irq)
+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
- return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
+ return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
}
static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
return irq_num * IRQ_CFG_IRQ_BITS;
}
-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_ctrl_reg_from_bank(bank);
+ return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_status_reg(u16 irq)
+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_status_reg_from_bank(bank);
+ return sunxi_irq_status_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 9973cebb4d6f..07462d79d040 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,8 +309,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* much memory to the process.
*/
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, address, 1,
- !is_write, 0, &page, NULL);
+ ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
up_read(&current->mm->mmap_sem);
if (ret < 0)
break;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cdfd01f0adb8..8fad0a7044d3 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1091,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+ RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6b3df5..fad968eb75f6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
.max_register = FTM_PWMLOAD,
.volatile_reg = fsl_pwm_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9607bc826460..5d4d91846357 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -886,7 +886,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
}
down_read(&current->mm->mmap_sem);
- pinned = get_user_pages(current, current->mm,
+ pinned = get_user_pages(
(unsigned long)xfer->loc_addr & PAGE_MASK,
nr_pages, dir == DMA_FROM_DEVICE, 0,
page_list, NULL);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 6bb04d453247..6f056caa8a56 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -189,9 +189,9 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
}
ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (!ddata->boot_base) {
+ if (IS_ERR(ddata->boot_base)) {
dev_err(dev, "Boot base not found\n");
- return -EINVAL;
+ return PTR_ERR(ddata->boot_base);
}
err = of_property_read_u32_index(np, "st,syscfg", 1,
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 17ad5749e91d..1e560188dd13 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
struct alias_pav_group *group;
struct dasd_uid uid;
+ spin_lock(get_ccwdev_lock(device->cdev));
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
private->uid.base_unit_addr =
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
uid = private->uid;
-
+ spin_unlock(get_ccwdev_lock(device->cdev));
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
list_move(&device->alias_list, &lcu->active_devices);
return 0;
}
-
group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
@@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
return 0;
}
-/*
- * This function tries to lock all devices on an lcu via trylock
- * return NULL on success otherwise return first failed device
- */
-static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
- struct dasd_device *pos)
-
-{
- struct alias_pav_group *pavgroup;
- struct dasd_device *device;
-
- list_for_each_entry(device, &lcu->active_devices, alias_list) {
- if (device == pos)
- continue;
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
- return device;
- }
- list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
- if (device == pos)
- continue;
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
- return device;
- }
- list_for_each_entry(pavgroup, &lcu->grouplist, group) {
- list_for_each_entry(device, &pavgroup->baselist, alias_list) {
- if (device == pos)
- continue;
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
- return device;
- }
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
- if (device == pos)
- continue;
- if (!spin_trylock(get_ccwdev_lock(device->cdev)))
- return device;
- }
- }
- return NULL;
-}
-
-/*
- * unlock all devices except the one that is specified as pos
- * stop if enddev is specified and reached
- */
-static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
- struct dasd_device *pos,
- struct dasd_device *enddev)
-
-{
- struct alias_pav_group *pavgroup;
- struct dasd_device *device;
-
- list_for_each_entry(device, &lcu->active_devices, alias_list) {
- if (device == pos)
- continue;
- if (device == enddev)
- return;
- spin_unlock(get_ccwdev_lock(device->cdev));
- }
- list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
- if (device == pos)
- continue;
- if (device == enddev)
- return;
- spin_unlock(get_ccwdev_lock(device->cdev));
- }
- list_for_each_entry(pavgroup, &lcu->grouplist, group) {
- list_for_each_entry(device, &pavgroup->baselist, alias_list) {
- if (device == pos)
- continue;
- if (device == enddev)
- return;
- spin_unlock(get_ccwdev_lock(device->cdev));
- }
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
- if (device == pos)
- continue;
- if (device == enddev)
- return;
- spin_unlock(get_ccwdev_lock(device->cdev));
- }
- }
-}
-
-/*
- * this function is needed because the locking order
- * device lock -> lcu lock
- * needs to be assured when iterating over devices in an LCU
- *
- * if a device is specified in pos then the device lock is already hold
- */
-static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
- struct dasd_device *pos,
- unsigned long *flags)
-{
- struct dasd_device *failed;
-
- do {
- spin_lock_irqsave(&lcu->lock, *flags);
- failed = _trylock_all_devices_on_lcu(lcu, pos);
- if (failed) {
- _unlock_all_devices_on_lcu(lcu, pos, failed);
- spin_unlock_irqrestore(&lcu->lock, *flags);
- cpu_relax();
- }
- } while (failed);
-}
-
-static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
- struct dasd_device *pos)
-{
- struct dasd_device *failed;
-
- do {
- spin_lock(&lcu->lock);
- failed = _trylock_all_devices_on_lcu(lcu, pos);
- if (failed) {
- _unlock_all_devices_on_lcu(lcu, pos, failed);
- spin_unlock(&lcu->lock);
- cpu_relax();
- }
- } while (failed);
-}
-
static int read_unit_address_configuration(struct dasd_device *device,
struct alias_lcu *lcu)
{
@@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
if (rc)
return rc;
- _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
+ spin_lock_irqsave(&lcu->lock, flags);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
- _unlock_all_devices_on_lcu(lcu, NULL, NULL);
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
@@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device)
lcu = private->lcu;
rc = 0;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- spin_lock(&lcu->lock);
+ spin_lock_irqsave(&lcu->lock, flags);
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
@@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device)
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
- spin_unlock(&lcu->lock);
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ spin_unlock_irqrestore(&lcu->lock, flags);
return rc;
}
@@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
- list_for_each_entry(device, &lcu->active_devices, alias_list)
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
- list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
- list_for_each_entry(device, &pavgroup->baselist, alias_list)
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
}
}
@@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
- list_for_each_entry(device, &lcu->active_devices, alias_list)
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
- list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
- list_for_each_entry(device, &pavgroup->baselist, alias_list)
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
- list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
}
}
@@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work)
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
reset_summary_unit_check(lcu, device, suc_data->reason);
- _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
+ spin_lock_irqsave(&lcu->lock, flags);
_unstop_all_devices_on_lcu(lcu);
_restart_all_base_devices_on_lcu(lcu);
/* 3. read new alias configuration */
_schedule_lcu_update(lcu, device);
lcu->suc_data.device = NULL;
dasd_put_device(device);
- _unlock_all_devices_on_lcu(lcu, NULL, NULL);
spin_unlock_irqrestore(&lcu->lock, flags);
}
-/*
- * note: this will be called from int handler context (cdev locked)
- */
-void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
- struct irb *irb)
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ suc_work);
struct dasd_eckd_private *private = device->private;
struct alias_lcu *lcu;
- char reason;
- char *sense;
-
- sense = dasd_get_sense(irb);
- if (sense) {
- reason = sense[8];
- DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
- "eckd handle summary unit check: reason", reason);
- } else {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "eckd handle summary unit check:"
- " no reason code available");
- return;
- }
+ unsigned long flags;
lcu = private->lcu;
if (!lcu) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device not ready to handle summary"
" unit check (no lcu structure)");
- return;
+ goto out;
}
- _trylock_and_lock_lcu(lcu, device);
+ spin_lock_irqsave(&lcu->lock, flags);
/* If this device is about to be removed just return and wait for
* the next interrupt on a different device
*/
@@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device is in offline processing,"
" don't do summary unit check handling");
- _unlock_all_devices_on_lcu(lcu, device, NULL);
- spin_unlock(&lcu->lock);
- return;
+ goto out_unlock;
}
if (lcu->suc_data.device) {
/* already scheduled or running */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"previous instance of summary unit check worker"
" still pending");
- _unlock_all_devices_on_lcu(lcu, device, NULL);
- spin_unlock(&lcu->lock);
- return ;
+ goto out_unlock;
}
_stop_all_devices_on_lcu(lcu);
/* prepare for lcu_update */
- private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
- lcu->suc_data.reason = reason;
+ lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+ lcu->suc_data.reason = private->suc_reason;
lcu->suc_data.device = device;
dasd_get_device(device);
- _unlock_all_devices_on_lcu(lcu, device, NULL);
- spin_unlock(&lcu->lock);
if (!schedule_work(&lcu->suc_data.worker))
dasd_put_device(device);
+out_unlock:
+ spin_unlock_irqrestore(&lcu->lock, flags);
+out:
+ clear_bit(DASD_FLAG_SUC, &device->flags);
+ dasd_put_device(device);
};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 75c032dcf173..c1b4ae55e129 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1682,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
/* setup work queue for validate server*/
INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+ /* setup work queue for summary unit check */
+ INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
@@ -2549,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
device->state == DASD_STATE_ONLINE &&
!test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
- /*
- * the state change could be caused by an alias
- * reassignment remove device from alias handling
- * to prevent new requests from being scheduled on
- * the wrong alias device
- */
- dasd_alias_remove_device(device);
-
/* schedule worker to reload device */
dasd_reload_device(device);
}
@@ -2571,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
/* summary unit check */
if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
(scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
- dasd_alias_handle_summary_unit_check(device, irb);
+ if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd suc: device already notified");
+ return;
+ }
+ sense = dasd_get_sense(irb);
+ if (!sense) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd suc: no reason code available");
+ clear_bit(DASD_FLAG_SUC, &device->flags);
+ return;
+
+ }
+ private->suc_reason = sense[8];
+ DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+ "eckd handle summary unit check: reason",
+ private->suc_reason);
+ dasd_get_device(device);
+ if (!schedule_work(&device->suc_work))
+ dasd_put_device(device);
+
return;
}
@@ -4495,6 +4509,12 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
struct dasd_uid uid;
unsigned long flags;
+ /*
+ * remove device from alias handling to prevent new requests
+ * from being scheduled on the wrong alias device
+ */
+ dasd_alias_remove_device(device);
+
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
old_base = private->uid.base_unit_addr;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index f8f91ee652d3..6d9a6d3517cd 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -525,6 +525,7 @@ struct dasd_eckd_private {
int count;
u32 fcx_max_data;
+ char suc_reason;
};
@@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
int dasd_alias_add_device(struct dasd_device *);
int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
-void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
+void dasd_alias_handle_summary_unit_check(struct work_struct *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8de29be32a56..0f0add932e7a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -470,6 +470,7 @@ struct dasd_device {
struct work_struct restore_device;
struct work_struct reload_device;
struct work_struct kick_validate;
+ struct work_struct suc_work;
struct timer_list timer;
debug_info_t *debug_area;
@@ -542,6 +543,7 @@ struct dasd_attention_data {
#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */
+#define DASD_FLAG_SUC 14 /* unhandled summary unit check */
#define DASD_SLEEPON_START_TAG ((void *) 1)
#define DASD_SLEEPON_END_TAG ((void *) 2)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 21a67ed047e8..ff6caab8cc8b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -452,10 +452,11 @@ static int aac_slave_configure(struct scsi_device *sdev)
else if (depth < 2)
depth = 2;
scsi_change_queue_depth(sdev, depth);
- } else
+ } else {
scsi_change_queue_depth(sdev, 1);
sdev->tagged_supported = 1;
+ }
return 0;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 35968bdb4866..8fb9643fe6e3 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
atomic64_set(&afu->room, room);
if (room)
goto write_rrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
if (rrin != 0x1)
break;
/* Double delay each time */
- udelay(2 << nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
}
@@ -338,7 +338,7 @@ retry:
atomic64_set(&afu->room, room);
if (room)
goto write_ioarrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
* afu->room.
*/
if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(nretry);
+ udelay(1 << nretry);
goto retry;
}
@@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
/**
- * term_mc() - terminates the master context
+ * term_intr() - disables all AFU interrupts
* @cfg: Internal structure associated with the host.
* @level: Depth of allocation, where to begin waterfall tear down.
*
* Safe to call with AFU/MC in partially allocated/initialized state.
*/
-static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
+static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
{
- int rc = 0;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
if (!afu || !cfg->mcctx) {
- dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
- __func__);
+ dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
return;
}
switch (level) {
- case UNDO_START:
- rc = cxl_stop_context(cfg->mcctx);
- BUG_ON(rc);
case UNMAP_THREE:
cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
case UNMAP_TWO:
@@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
case FREE_IRQ:
cxl_free_afu_irqs(cfg->mcctx);
- case RELEASE_CONTEXT:
- cfg->mcctx = NULL;
+ /* fall through */
+ case UNDO_NOOP:
+ /* No action required */
+ break;
+ }
+}
+
+/**
+ * term_mc() - terminates the master context
+ * @cfg: Internal structure associated with the host.
+ * @level: Depth of allocation, where to begin waterfall tear down.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_mc(struct cxlflash_cfg *cfg)
+{
+ int rc = 0;
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+
+ if (!afu || !cfg->mcctx) {
+ dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
+ return;
}
+
+ rc = cxl_stop_context(cfg->mcctx);
+ WARN_ON(rc);
+ cfg->mcctx = NULL;
}
/**
@@ -726,10 +746,20 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
*/
static void term_afu(struct cxlflash_cfg *cfg)
{
+ /*
+ * Tear down is carefully orchestrated to ensure
+ * no interrupts can come in when the problem state
+ * area is unmapped.
+ *
+ * 1) Disable all AFU interrupts
+ * 2) Unmap the problem state area
+ * 3) Stop the master context
+ */
+ term_intr(cfg, UNMAP_THREE);
if (cfg->afu)
stop_afu(cfg);
- term_mc(cfg, UNDO_START);
+ term_mc(cfg);
pr_debug("%s: returning\n", __func__);
}
@@ -1597,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg)
}
/**
- * init_mc() - create and register as the master context
+ * init_intr() - setup interrupt handlers for the master context
* @cfg: Internal structure associated with the host.
*
* Return: 0 on success, -errno on failure
*/
-static int init_mc(struct cxlflash_cfg *cfg)
+static enum undo_level init_intr(struct cxlflash_cfg *cfg,
+ struct cxl_context *ctx)
{
- struct cxl_context *ctx;
- struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
- enum undo_level level;
-
- ctx = cxl_get_context(cfg->dev);
- if (unlikely(!ctx))
- return -ENOMEM;
- cfg->mcctx = ctx;
-
- /* Set it up as a master with the CXL */
- cxl_set_master(ctx);
-
- /* During initialization reset the AFU to start from a clean slate */
- rc = cxl_afu_reset(cfg->mcctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
- __func__, rc);
- level = RELEASE_CONTEXT;
- goto out;
- }
+ enum undo_level level = UNDO_NOOP;
rc = cxl_allocate_afu_irqs(ctx, 3);
if (unlikely(rc)) {
dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
__func__, rc);
- level = RELEASE_CONTEXT;
+ level = UNDO_NOOP;
goto out;
}
@@ -1661,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg)
level = UNMAP_TWO;
goto out;
}
+out:
+ return level;
+}
- rc = 0;
+/**
+ * init_mc() - create and register as the master context
+ * @cfg: Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_mc(struct cxlflash_cfg *cfg)
+{
+ struct cxl_context *ctx;
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+ enum undo_level level;
+
+ ctx = cxl_get_context(cfg->dev);
+ if (unlikely(!ctx)) {
+ rc = -ENOMEM;
+ goto ret;
+ }
+ cfg->mcctx = ctx;
+
+ /* Set it up as a master with the CXL */
+ cxl_set_master(ctx);
+
+ /* During initialization reset the AFU to start from a clean slate */
+ rc = cxl_afu_reset(cfg->mcctx);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
+ __func__, rc);
+ goto ret;
+ }
+
+ level = init_intr(cfg, ctx);
+ if (unlikely(level)) {
+ dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
/* This performs the equivalent of the CXL_IOCTL_START_WORK.
* The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1678,7 +1730,7 @@ ret:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
out:
- term_mc(cfg, level);
+ term_intr(cfg, level);
goto ret;
}
@@ -1751,7 +1803,8 @@ out:
err2:
kref_put(&afu->mapcount, afu_unmap);
err1:
- term_mc(cfg, UNDO_START);
+ term_intr(cfg, UNMAP_THREE);
+ term_mc(cfg);
goto out;
}
@@ -2488,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
if (unlikely(rc))
dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
__func__, rc);
- stop_afu(cfg);
- term_mc(cfg, UNDO_START);
+ term_afu(cfg);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0faed422c7f4..eb9d8f730b38 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -79,12 +79,11 @@
#define WWPN_BUF_LEN (WWPN_LEN + 1)
enum undo_level {
- RELEASE_CONTEXT = 0,
+ UNDO_NOOP = 0,
FREE_IRQ,
UNMAP_ONE,
UNMAP_TWO,
- UNMAP_THREE,
- UNDO_START
+ UNMAP_THREE
};
struct dev_dependent_vals {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a404a41e871c..8eaed0522aa3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1112,9 +1112,9 @@ static void alua_bus_detach(struct scsi_device *sdev)
h->sdev = NULL;
spin_unlock(&h->pg_lock);
if (pg) {
- spin_lock(&pg->lock);
+ spin_lock_irq(&pg->lock);
list_del_rcu(&h->node);
- spin_unlock(&pg->lock);
+ spin_unlock_irq(&pg->lock);
kref_put(&pg->kref, release_port_group);
}
sdev->handler_data = NULL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e4db5fb3239a..8c44b9c424af 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5030,7 +5030,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
{
- int r, i;
+ int r, i, index;
unsigned long flags;
u32 reply_address;
u16 smid;
@@ -5039,8 +5039,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
u8 hide_flag;
struct adapter_reply_queue *reply_q;
- long reply_post_free;
- u32 reply_post_free_sz, index = 0;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -5124,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
_base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(Mpi2DefaultReplyDescriptor_t);
- reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+ index = 0;
+ reply_post_free_contig = ioc->reply_post[0].reply_post_free;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable) {
+ reply_q->reply_post_free =
+ ioc->reply_post[index++].reply_post_free;
+ } else {
+ reply_q->reply_post_free = reply_post_free_contig;
+ reply_post_free_contig += ioc->reply_post_queue_depth;
+ }
+
reply_q->reply_post_host_index = 0;
- reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
- reply_post_free;
for (i = 0; i < ioc->reply_post_queue_depth; i++)
reply_q->reply_post_free[i].Words =
cpu_to_le64(ULLONG_MAX);
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_free_queue;
- /*
- * If RDPQ is enabled, switch to the next allocation.
- * Otherwise advance within the contiguous region.
- */
- if (ioc->rdpq_array_enable)
- reply_post_free = (long)
- ioc->reply_post[++index].reply_post_free;
- else
- reply_post_free += reply_post_free_sz;
}
skip_init_reply_post_free_queue:
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b1bf42b93fcc..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
int pg83_supported = 0;
unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
- if (sdev->skip_vpd_pages)
+ if (!scsi_device_supports_vpd(sdev))
return;
+
retry_pg0:
vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
if (!vpd_buf)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 92ffd2406f97..2b642b145be1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,7 @@ const char *scsi_host_state_name(enum scsi_host_state state)
return name;
}
+#ifdef CONFIG_SCSI_DH
static const struct {
unsigned char value;
char *name;
@@ -94,7 +95,7 @@ static const struct {
{ SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
};
-const char *scsi_access_state_name(unsigned char state)
+static const char *scsi_access_state_name(unsigned char state)
{
int i;
char *name = NULL;
@@ -107,6 +108,7 @@ const char *scsi_access_state_name(unsigned char state)
}
return name;
}
+#endif
static int check_set(unsigned long long *val, char *src)
{
@@ -226,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
}
/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
-struct device_attribute dev_attr_hstate =
+static struct device_attribute dev_attr_hstate =
__ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
static ssize_t
@@ -401,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
-struct attribute_group scsi_shost_attr_group = {
+static struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a5457ac9cdb..f52b74cf8d1e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
+ sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
int diskinfo[4];
/* default to most commonly used values */
- diskinfo[0] = 0x40; /* 1 << 6 */
- diskinfo[1] = 0x20; /* 1 << 5 */
- diskinfo[2] = sdkp->capacity >> 11;
-
+ diskinfo[0] = 0x40; /* 1 << 6 */
+ diskinfo[1] = 0x20; /* 1 << 5 */
+ diskinfo[2] = capacity >> 11;
+
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
- host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+ host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
else
- scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+ scsicam_bios_param(bdev, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
if (sdkp->capacity > 0xffffffff)
sdp->use_16_for_rw = 1;
- /* Rescale capacity to 512-byte units */
- if (sector_size == 4096)
- sdkp->capacity <<= 3;
- else if (sector_size == 2048)
- sdkp->capacity <<= 2;
- else if (sector_size == 1024)
- sdkp->capacity <<= 1;
-
blk_queue_physical_block_size(sdp->request_queue,
sdkp->physical_block_size);
sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->ws10 = 1;
}
-static int sd_try_extended_inquiry(struct scsi_device *sdp)
-{
- /* Attempt VPD inquiry if the device blacklist explicitly calls
- * for it.
- */
- if (sdp->try_vpd_pages)
- return 1;
- /*
- * Although VPD inquiries can go to SCSI-2 type devices,
- * some USB ones crash on receiving them, and the pages
- * we currently ask for are for SPC-3 and beyond
- */
- if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
- return 1;
- return 0;
-}
-
-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
-{
- return blocks << (ilog2(sdev->sector_size) - 9);
-}
-
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
- if (sd_try_extended_inquiry(sdp)) {
+ if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
rw_max = q->limits.io_opt =
sdkp->opt_xfer_blocks * sdp->sector_size;
else
@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
- set_capacity(disk, sdkp->capacity);
+ set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
struct device dev;
struct gendisk *disk;
atomic_t openers;
- sector_t capacity; /* size in 512-byte sectors */
+ sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
return 0;
}
+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71c5138ddf94..dbf1882cfbac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
out_unmap:
if (res > 0) {
for (j=0; j < res; j++)
- page_cache_release(pages[j]);
+ put_page(pages[j]);
res = 0;
}
kfree(pages);
@@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
/* FIXME: cache flush missing for rw==READ
* FIXME: call the correct reference counting function
*/
- page_cache_release(page);
+ put_page(page);
}
kfree(STbp->mapped_pages);
STbp->mapped_pages = NULL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e7a19be87c38..50769078e72e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- unsigned int bpw = transfer->bits_per_word;
+ unsigned int bpw;
if (!master->dma_rx)
return false;
+ if (!transfer)
+ return false;
+
+ bpw = transfer->bits_per_word;
if (!bpw)
bpw = spi->bits_per_word;
@@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
u32 clk = config->speed_hz, delay, reg;
+ u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
/*
* The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
if (config->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
if (config->mode & SPI_CPOL) {
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+ } else {
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
}
if (config->mode & SPI_CS_HIGH)
cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
if (spi_imx->usedma)
ctrl |= MX51_ECSPI_CTRL_SMC;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8bef46..43a02e377b3b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
- struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
- sg_init_table(&sg, 1);
- sg_dma_address(&sg) = xfer->tx_dma;
- sg_dma_len(&sg) = xfer->len;
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_tx_callback;
tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (mcspi_dma->dma_rx) {
struct dma_async_tx_descriptor *tx;
- struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
dma_count -= es;
- sg_init_table(&sg, 1);
- sg_dma_address(&sg) = xfer->rx_dma;
- sg_dma_len(&sg) = dma_count;
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
- DMA_CTRL_ACK);
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_rx_callback;
tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
omap2_mcspi_set_dma_req(spi, 1, 1);
wait_for_completion(&mcspi_dma->dma_rx_completion);
- dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
- DMA_FROM_DEVICE);
if (mcspi->fifo_depth > 0)
return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
if (tx != NULL) {
wait_for_completion(&mcspi_dma->dma_tx_completion);
- dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
- DMA_TO_DEVICE);
if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
gpio_free(spi->cs_gpio);
}
+static bool omap2_mcspi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ if (xfer->len < DMA_MIN_BYTES)
+ return false;
+
+ return true;
+}
+
static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
struct spi_device *spi, struct spi_transfer *t)
{
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
return -EINVAL;
}
- if (len < DMA_MIN_BYTES)
- goto skip_dma_map;
-
- if (mcspi_dma->dma_tx && tx_buf != NULL) {
- t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'T', len);
- return -EINVAL;
- }
- }
- if (mcspi_dma->dma_rx && rx_buf != NULL) {
- t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'R', len);
- if (tx_buf != NULL)
- dma_unmap_single(mcspi->dev, t->tx_dma,
- len, DMA_TO_DEVICE);
- return -EINVAL;
- }
- }
-
-skip_dma_map:
return omap2_mcspi_work_one(mcspi, spi, t);
}
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
+ master->can_dma = omap2_mcspi_can_dma;
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 8f50a4020f6f..6c6c0013ec7a 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
if (WARN_ON(rs->speed > MAX_SCLK_OUT))
rs->speed = MAX_SCLK_OUT;
- /* the minimum divsor is 2 */
+ /* the minimum divisor is 2 */
if (rs->max_freq < 2 * rs->speed) {
clk_set_rate(rs->spiclk, 2 * rs->speed);
rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev)
master->transfer_one = rockchip_spi_transfer_one;
master->handle_err = rockchip_spi_handle_err;
- rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
- if (IS_ERR_OR_NULL(rs->dma_tx.ch)) {
+ rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
+ if (IS_ERR(rs->dma_tx.ch)) {
/* Check tx to see if we need defer probing driver */
if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_get_fifo_len;
}
dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+ rs->dma_tx.ch = NULL;
}
- rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
- if (!rs->dma_rx.ch) {
- if (rs->dma_tx.ch) {
+ rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
+ if (IS_ERR(rs->dma_rx.ch)) {
+ if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
dma_release_channel(rs->dma_tx.ch);
rs->dma_tx.ch = NULL;
+ ret = -EPROBE_DEFER;
+ goto err_get_fifo_len;
}
dev_warn(rs->dev, "Failed to request RX DMA channel\n");
+ rs->dma_rx.ch = NULL;
}
if (rs->dma_tx.ch && rs->dma_rx.ch) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index de2f2f90d799..0239b45eed92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
struct spi_master *master =
container_of(work, struct spi_master, pump_messages);
- __spi_pump_messages(master, true, false);
+ __spi_pump_messages(master, true, master->bus_lock_flag);
}
static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
- return __spi_sync(spi, message, 0);
+ return __spi_sync(spi, message, spi->master->bus_lock_flag);
}
EXPORT_SYMBOL_GPL(spi_sync);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index cf84581287b9..5bac28a3944e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,6 +30,8 @@ source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/comedi/Kconfig"
+source "drivers/staging/olpc_dcon/Kconfig"
+
source "drivers/staging/rtl8192u/Kconfig"
source "drivers/staging/rtl8192e/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 7d6448d20464..a954242b0f2c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,6 +4,7 @@ obj-y += media/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
+obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index dab486261154..13335437c69c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@ do { \
} while (0)
#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
#endif
#define LIBCFS_ALLOC_PRE(size, mask) \
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 0f2fd79e5ec8..837eb22749c3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -57,7 +57,7 @@
#include "../libcfs_cpu.h"
#endif
-#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
+#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
#define page_index(p) ((p)->index)
#define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+ min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
#else
#define NUM_CACHEPAGES totalram_pages
#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c341c5..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@ typedef struct {
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ * kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 3e1f24e77f64..d4ce06d0aeeb 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+ (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c90e5102fe06..c3d628bac5b8 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
max = TCD_MAX_PAGES;
} else {
max = max / num_possible_cpus();
- max <<= (20 - PAGE_CACHE_SHIFT);
+ max <<= (20 - PAGE_SHIFT);
}
rc = cfs_tracefile_init(max);
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index ec3bc04bd89f..244eb89eef68 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_CACHE_SIZE)
+ if (tage->used + len <= PAGE_SIZE)
return tage;
}
@@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
* from here: this will lead to infinite recursion.
*/
- if (len > PAGE_CACHE_SIZE) {
+ if (len > PAGE_SIZE) {
pr_err("cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
}
@@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (!tage) {
- if (needed + known_size > PAGE_CACHE_SIZE)
+ if (needed + known_size > PAGE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
- max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
+ max_nob = PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(KERN_EMERG "negative max_nob: %d\n",
max_nob);
@@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
__LASSERT(debug_buf == string_buf);
tage->used += needed;
- __LASSERT(tage->used <= PAGE_CACHE_SIZE);
+ __LASSERT(tage->used <= PAGE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
- if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
+ if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
return -EINVAL;
*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
}
mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_CACHE_SHIFT);
+ pages = mb << (20 - PAGE_SHIFT);
cfs_tracefile_write_lock();
@@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
cfs_tracefile_read_unlock();
- return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
+ return (total_pages >> (20 - PAGE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 4c77f9044dd3..ac84e7f4c859 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
/*
* Private declare for tracefile
*/
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@ do { \
do { \
__LASSERT(tage); \
__LASSERT(tage->page); \
- __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
+ __LASSERT(tage->used <= PAGE_SIZE); \
__LASSERT(page_count(tage->page) > 0); \
} while (0)
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index c74514f99f90..75d31217bf92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
+ lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 0009a8de77d5..f19aa9320e34 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
if (len <= frag_len) {
dst->kiov_len = len;
LASSERT(dst->kiov_offset + dst->kiov_len
- <= PAGE_CACHE_SIZE);
+ <= PAGE_SIZE);
return niov;
}
dst->kiov_len = frag_len;
- LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
len -= frag_len;
dst++;
@@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index cc0c2753dd63..891fd59401d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
toobig = 1;
- nalloc = PAGE_CACHE_SIZE / sizeof(*ifr);
+ nalloc = PAGE_SIZE / sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 61459cf9d58f..b01dc424c514 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -27,8 +27,8 @@
#define LNET_NRB_SMALL_PAGES 1
#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
-#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
- PAGE_CACHE_SHIFT)
+#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
+ PAGE_SHIFT)
static char *forwarding = "";
module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
return NULL;
}
- rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
+ rb->rb_kiov[i].kiov_len = PAGE_SIZE;
rb->rb_kiov[i].kiov_offset = 0;
rb->rb_kiov[i].kiov_page = page;
}
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index eebc92412061..dcb6e506f592 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
* NB: this is not going to work for variable page size,
* but we have to keep it for compatibility
*/
- len = npg * PAGE_CACHE_SIZE;
+ len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
if (npg > LNET_MAX_IOV || npg <= 0)
@@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ addr += PAGE_SIZE - BRW_MSIZE;
memcpy(addr, &magic, BRW_MSIZE);
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
+ for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
return;
}
@@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
if (data != magic)
goto bad_data;
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ addr += PAGE_SIZE - BRW_MSIZE;
data = *((__u64 *)addr);
if (data != magic)
goto bad_data;
@@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
+ for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
data = *(((__u64 *)addr) + i);
if (data != magic)
goto bad_data;
@@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
- len = npg * PAGE_CACHE_SIZE;
+ len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
reply->brw_status = EINVAL;
return 0;
}
- npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
+ npg = reqst->brw_len >> PAGE_SHIFT;
} else {
- npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 5c7cb72eac9a..79ee6c0bf7c1 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (args->lstio_tes_param &&
(args->lstio_tes_param_len <= 0 ||
args->lstio_tes_param_len >
- PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+ PAGE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
opc = data->ioc_u32[0];
- if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ if (data->ioc_plen1 > PAGE_SIZE)
return -EINVAL;
LIBCFS_ALLOC(buf, data->ioc_plen1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index bcd78888f9cc..35a227d0c657 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
+ brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
brq->blk_flags = param->blk_flags;
return 0;
@@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
if (transop == LST_TRANS_TSBCLIADD) {
npg = sfw_id_pages(test->tes_span);
nob = !(feats & LST_FEAT_BULK_LEN) ?
- npg * PAGE_CACHE_SIZE :
+ npg * PAGE_SIZE :
sizeof(lnet_process_id_packed_t) * test->tes_span;
}
@@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
LASSERT(nob > 0);
len = !(feats & LST_FEAT_BULK_LEN) ?
- PAGE_CACHE_SIZE :
- min_t(int, nob, PAGE_CACHE_SIZE);
+ PAGE_SIZE :
+ min_t(int, nob, PAGE_SIZE);
nob -= len;
bulk->bk_iovs[i].kiov_offset = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 926c3970c498..e2c532399366 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
int len;
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- len = npg * PAGE_CACHE_SIZE;
+ len = npg * PAGE_SIZE;
} else {
len = sizeof(lnet_process_id_packed_t) *
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 69be7d6f48fa..7d7748d96332 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
- nob = min_t(int, nob, PAGE_CACHE_SIZE);
+ nob = min_t(int, nob, PAGE_SIZE);
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 288522d4d7b9..e689ca1846e1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,10 +390,10 @@ typedef struct sfw_test_instance {
} tsi_u;
} sfw_test_instance_t;
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
- * the end of pages are not used */
+/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99e1fb4..c6c7f54637fb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return;
if (PagePrivate(page))
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
cancel_dirty_page(page);
ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b13a305..242bb1ef6245 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@ struct lu_context_key {
{ \
type *value; \
\
- CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof (*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index da8bc6eadd13..5aae1d06a5fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
+ * It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
/** @} lu_dir */
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index df94f9f3bef2..af77eb359c43 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
if (cli->cl_max_mds_easize < body->max_mdsize) {
cli->cl_max_mds_easize = body->max_mdsize;
cli->cl_default_mds_easize =
- min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
+ min_t(__u32, body->max_mdsize, PAGE_SIZE);
}
if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
cli->cl_max_mds_cookiesize = body->max_cookiesize;
cli->cl_default_mds_cookiesize =
- min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
+ min_t(__u32, body->max_cookiesize, PAGE_SIZE);
}
}
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 4fa1a18b7d15..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -99,21 +99,21 @@
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
# endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4a0f2e8b19f6..4264d97650ec 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@ struct client_obd {
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
+ * the extent size. A chunk is max(PAGE_SIZE, OST block size)
*/
int cl_chunkbits;
int cl_chunk;
@@ -1318,7 +1318,7 @@ bad_format:
static inline int cli_brw_size(struct obd_device *obd)
{
- return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
}
#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 225262fa67b6..f8ee3a3254ba 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -500,7 +500,7 @@ extern char obd_jobid_var[];
#ifdef POISON_BULK
#define POISON_PAGE(page, val) do { \
- memset(kmap(page), val, PAGE_CACHE_SIZE); \
+ memset(kmap(page), val, PAGE_SIZE); \
kunmap(page); \
} while (0)
#else
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index aced41ab93a1..96141d17d07f 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
* --bug 17336
*/
loff_t size = cl_isize_read(inode);
- loff_t cur_index = start >> PAGE_CACHE_SHIFT;
+ loff_t cur_index = start >> PAGE_SHIFT;
loff_t size_index = (size - 1) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
if ((size == 0 && cur_index != 0) ||
size_index < cur_index)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b586d5a88d00..7dd7df59aa1f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
- cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
+ if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
+ cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
* In the future this should likely be increased. LU-1431
*/
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
- LNET_MTU >> PAGE_CACHE_SHIFT);
+ LNET_MTU >> PAGE_SHIFT);
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3e937b050203..b913ba9cf97c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
/*
* 50 ldlm locks for 1MB of RAM.
*/
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
/*
* Maximal possible grant step plan in %.
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c7904a96f9af..74e193e52cd6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
{
int avail;
- avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
+ avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
if (likely(avail >= 0))
avail /= (int)sizeof(struct lustre_handle);
else
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4e0a3e583330..e4c82883e580 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted. See
- * lmv_adjust_dirpages().
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
+ * for this integrated page will be adjusted. See lmv_adjust_dirpages().
*
*/
@@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
struct page **page_pool;
struct page *page;
struct lu_dirpage *dp;
- int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
+ int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
int nrdpgs = 0; /* number of pages read actually */
int npages;
int i;
@@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
if (body->valid & OBD_MD_FLSIZE)
cl_isize_write(inode, body->size);
- nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
- >> PAGE_CACHE_SHIFT;
+ nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
+ >> PAGE_SHIFT;
SetPageUptodate(page0);
}
unlock_page(page0);
@@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
page = page_pool[i];
if (rc < 0 || i >= nrdpgs) {
- page_cache_release(page);
+ put_page(page);
continue;
}
@@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
offset, ret);
}
- page_cache_release(page);
+ put_page(page);
}
if (page_pool != &page0)
@@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
truncate_complete_page(page->mapping, page);
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
/*
@@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
if (found > 0 && !radix_tree_exceptional_entry(page)) {
struct lu_dirpage *dp;
- page_cache_get(page);
+ get_page(page);
spin_unlock_irq(&mapping->tree_lock);
/*
* In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
page = NULL;
}
} else {
- page_cache_release(page);
+ put_page(page);
page = ERR_PTR(-EIO);
}
@@ -1507,7 +1506,7 @@ skip_lmm:
st.st_gid = body->gid;
st.st_rdev = body->rdev;
st.st_size = body->size;
- st.st_blksize = PAGE_CACHE_SIZE;
+ st.st_blksize = PAGE_SIZE;
st.st_blocks = body->blocks;
st.st_atime = body->atime;
st.st_mtime = body->mtime;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3e1572cb457b..e3c0f1dd4d31 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
/* default to about 40meg of readahead on a given system. That much tied
* up in 512k readahead requests serviced at 40ms each is about 1GB/s.
*/
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
@@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
static inline void ll_invalidate_page(struct page *vmpage)
{
struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+ loff_t offset = vmpage->index << PAGE_SHIFT;
LASSERT(PageLocked(vmpage));
if (!mapping)
return;
- ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+ ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6d6bb33e3655..b57a992688a8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
- if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
+ if (pages >> (20 - PAGE_SHIFT) < 512)
lru_page_max = pages / 2;
else
lru_page_max = (pages / 4) * 3;
@@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
valid != CLIENT_CONNECT_MDT_REQD) {
char *buf;
- buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto out_md_fid;
}
- obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
+ obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
sbi->ll_md_brw_size = data->ocd_brw_size;
else
- sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
+ sbi->ll_md_brw_size = PAGE_SIZE;
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
LCONSOLE_INFO("Layout lock feature supported.\n");
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 69445a9f2011..5b484e62ffd0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+ (vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
@@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage) {
- page_cache_release(vmpage);
+ put_page(vmpage);
vmf->page = NULL;
}
}
@@ -360,7 +360,7 @@ restart:
lock_page(vmpage);
if (unlikely(!vmpage->mapping)) { /* unlucky */
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
vmf->page = NULL;
if (!printed && ++count > 16) {
@@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
last - first + 1, 0);
}
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b725fc16cf49..f169c0db63b4 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec.bv_offset != 0);
- BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
+ BUG_ON(bvec.bv_len != PAGE_SIZE);
pages[page_count] = bvec.bv_page;
offsets[page_count] = offset;
@@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
page_count);
- pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
+ pvec->ldp_size = page_count << PAGE_SHIFT;
pvec->ldp_nr = page_count;
/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->lo_blocksize = PAGE_CACHE_SIZE;
+ lo->lo_blocksize = PAGE_SIZE;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
lo->lo_queue->queuedata = lo;
/* queue parameters */
- CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+ CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)PAGE_CACHE_SIZE);
+ (unsigned short)PAGE_SIZE);
blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+ LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 45941a6600fe..27ab1261400e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
if (rc)
return rc;
- pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+ pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number > totalram_pages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
- totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+ totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
*/
if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+ sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
return -ERANGE;
}
@@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
- int shift = 20 - PAGE_CACHE_SHIFT;
+ int shift = 20 - PAGE_SHIFT;
int max_cached_mb;
int unused_mb;
@@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
return -EFAULT;
kernbuf[count] = 0;
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
kernbuf;
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
- totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+ totalram_pages >> (20 - PAGE_SHIFT));
return -ERANGE;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 34614acf3f8e..edab6c5b7e50 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
*/
io->ci_lockreq = CILR_NEVER;
- pos = vmpage->index << PAGE_CACHE_SHIFT;
+ pos = vmpage->index << PAGE_SHIFT;
/* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
+ result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
if (result == 0) {
cio->cui_fd = LUSTRE_FPRIVATE(file);
cio->cui_iter = NULL;
@@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
}
if (rc != 1)
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
} else {
which = RA_STAT_FAILED_GRAB_PAGE;
msg = "g_c_p_n failed";
@@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
* striped over, rather than having a constant value for all files here.
*/
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
* Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
* up quickly which will affect read performance significantly. See LU-2816
*/
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
{
@@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
end = rpc_boundary;
/* Truncate RA window to end of file */
- end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
+ end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
- if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+ if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
ll_ra_stats_inc(mapping, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (ras->ras_requests == 2 && !ras->ras_request_index) {
__u64 kms_pages;
- kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
* PageWriteback or clean the page.
*/
result = cl_sync_file_range(inode, offset,
- offset + PAGE_CACHE_SIZE - 1,
+ offset + PAGE_SIZE - 1,
CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
int ignore_layout = 0;
if (wbc->range_cyclic) {
- start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+ start = mapping->writeback_index << PAGE_SHIFT;
end = OBD_OBJECT_EOF;
} else {
start = wbc->range_start;
@@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
end = i_size_read(inode);
- mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 7a5db67bc680..69aa15e8e3ef 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
* below because they are run with page locked and all our io is
* happening with locked page too
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
+ if (offset == 0 && length == PAGE_SIZE) {
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
inode = vmpage->mapping->host;
@@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
return -EFBIG;
}
- *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
+ *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ *max_pages -= user_addr >> PAGE_SHIFT;
*pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
if (*pages) {
@@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
for (i = 0; i < npages; i++) {
if (do_dirty)
set_page_dirty_lock(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
kvfree(pages);
}
@@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
*/
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
- PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
+ PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
loff_t file_offset)
{
@@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
CDEBUG(D_VFSTRACE,
"VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
- MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+ file_offset, file_offset, count >> PAGE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
* page worth of page pointers = 4MB on i386.
*/
if (result == -ENOMEM &&
- size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
- PAGE_CACHE_SIZE) {
+ size > (PAGE_SIZE / sizeof(*pages)) *
+ PAGE_SIZE) {
size = ((((size / 2) - 1) |
~CFS_PAGE_MASK) + 1) &
CFS_PAGE_MASK;
@@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int rc;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
rc = ll_prepare_write(file, page, from, from + len);
if (rc) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return rc;
}
@@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
int rc;
rc = ll_commit_write(file, page, from, from + copied);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return rc ?: copied;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index fb0c26ee7ff3..85a835976174 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos);
/*
- * XXX: explicit PAGE_CACHE_SIZE
+ * XXX: explicit PAGE_SIZE
*/
- bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+ bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ra_read_in(file, bead);
}
@@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
* We're completely overwriting an existing page, so _don't_
* set it up to date until commit_write
*/
- if (from == 0 && to == PAGE_CACHE_SIZE) {
+ if (from == 0 && to == PAGE_SIZE) {
CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
POISON_PAGE(page, 0x11);
} else
@@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
set_page_dirty(vmpage);
vvp_write_pending(cl2ccc(obj), cp);
} else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+ pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
bool need_clip = true;
/*
@@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
* being.
*/
if (last_index > pg->cp_index) {
- to = PAGE_CACHE_SIZE;
+ to = PAGE_SIZE;
need_clip = false;
} else if (last_index == pg->cp_index) {
int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 850bae734075..33ca3eb34965 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
struct page *vmpage = cp->cpg_page;
LASSERT(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
static void vvp_page_fini(const struct lu_env *env,
@@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
- offset = vmpage->index << PAGE_CACHE_SHIFT;
+ offset = vmpage->index << PAGE_SHIFT;
/*
* XXX is it safe to call this with the page lock held?
*/
- ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
+ ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
return 0;
}
@@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
cpg->cpg_page = vmpage;
- page_cache_get(vmpage);
+ get_page(vmpage);
INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 0f776cf8a5aa..9abb7c2b9231 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+#if PAGE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
- int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
+ int ncfspgs; /* pages read in PAGE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
@@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
if (rc != 0)
return rc;
- ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
- >> PAGE_CACHE_SHIFT;
+ ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 55dd8ef9525b..b91d3ff18b02 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1002,10 +1002,10 @@ restart_bulk:
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
mdc_readdir_pack(req, op_data->op_offset,
- PAGE_CACHE_SIZE * op_data->op_npages,
+ PAGE_SIZE * op_data->op_npages,
&op_data->op_fid1);
ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@ restart_bulk:
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
req->rq_bulk->bd_nob_transferred,
- PAGE_CACHE_SIZE * op_data->op_npages);
+ PAGE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
return -EPROTO;
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index b7dc87248032..3924b095bfb0 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
@@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
LASSERT(cfg->cfg_instance);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
- inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!inst)
return -ENOMEM;
- pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_CACHE_SIZE) {
+ pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_SIZE) {
kfree(inst);
return -E2BIG;
}
++pos;
buf = inst + pos;
- bufsz = PAGE_CACHE_SIZE - pos;
+ bufsz = PAGE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_CACHE_SIZE) {
+ if (entry->mne_length > PAGE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
@@ -1371,7 +1371,7 @@ again:
}
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_CACHE_SHIFT;
+ body->mcb_bits = PAGE_SHIFT;
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@ again:
}
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@ again:
goto out;
}
- if (ealen > nrpages << PAGE_CACHE_SHIFT) {
+ if (ealen > nrpages << PAGE_SHIFT) {
rc = -EINVAL;
goto out;
}
@@ -1439,7 +1439,7 @@ again:
ptr = kmap(pages[i]);
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, PAGE_CACHE_SIZE),
+ min_t(int, ealen, PAGE_SIZE),
mne_swab);
kunmap(pages[i]);
if (rc2 < 0) {
@@ -1448,7 +1448,7 @@ again:
break;
}
- ealen -= PAGE_CACHE_SIZE;
+ ealen -= PAGE_SIZE;
}
out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 231a2f26c693..394580016638 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
/*
* XXX for now.
*/
- return (loff_t)idx << PAGE_CACHE_SHIFT;
+ return (loff_t)idx << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_offset);
@@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
/*
* XXX for now.
*/
- return offset >> PAGE_CACHE_SHIFT;
+ return offset >> PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_index);
int cl_page_size(const struct cl_object *obj)
{
- return 1 << PAGE_CACHE_SHIFT;
+ return 1 << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1a938e1376f9..c2cf015962dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
+ if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
- (__u64)PAGE_CACHE_SIZE);
+ (__u64)PAGE_SIZE);
ret = -EINVAL;
}
@@ -509,7 +509,7 @@ static int __init obdclass_init(void)
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL).
*/
- if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
obd_max_dirty_pages = totalram_pages / 4;
else
obd_max_dirty_pages = totalram_pages / 2;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09b2b69..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
#include "../../include/lustre/lustre_idl.h"
#include <linux/fs.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
{
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
dst->i_blkbits = ffs(src->o_blksize) - 1;
- if (dst->i_blkbits < PAGE_CACHE_SHIFT)
- dst->i_blkbits = PAGE_CACHE_SHIFT;
+ if (dst->i_blkbits < PAGE_SHIFT)
+ dst->i_blkbits = PAGE_SHIFT;
/* allocation of space */
if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index fd333b9e968c..e6bf414a4444 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
return sprintf(buf, "%ul\n",
- obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
+ obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
}
static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
if (rc)
return rc;
- val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
+ val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
if (val > ((totalram_pages / 10) * 9)) {
/* Somebody wants to assign too much memory to dirty pages */
return -EINVAL;
}
- if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
+ if (val < 4 << (20 - PAGE_SHIFT)) {
/* Less than 4 Mb for dirty cache is also bad */
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 65a4746c89ca..978568ada8e9 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -840,8 +840,8 @@ static int lu_htable_order(void)
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
- if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
- cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
+ if (cache_size > 1 << (30 - PAGE_SHIFT))
+ cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
#endif
/* clear off unreasonable cache setting. */
@@ -853,7 +853,7 @@ static int lu_htable_order(void)
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
}
cache_size = cache_size / 100 * lu_cache_percent *
- (PAGE_CACHE_SIZE / 1024);
+ (PAGE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 64ffe243f870..1e83669c204d 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
struct page *vmpage = ep->ep_vmpage;
atomic_dec(&eco->eo_npages);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
static int echo_page_prep(const struct lu_env *env,
@@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
struct echo_object *eco = cl2echo_obj(obj);
ep->ep_vmpage = vmpage;
- page_cache_get(vmpage);
+ get_page(vmpage);
mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
atomic_inc(&eco->eo_npages);
@@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
LASSERT(rc == 0);
rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * PAGE_CACHE_SIZE - 1,
+ offset + npages * PAGE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
@@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
int delta;
/* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
+ LASSERT(count == PAGE_SIZE);
addr = kmap(page);
- for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off = offset + delta;
stripe_id = id;
@@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
int rc2;
/* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
+ LASSERT(count == PAGE_SIZE);
addr = kmap(page);
- for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
stripe_id = id;
@@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
return -EINVAL;
/* XXX think again with misaligned I/O */
- npages = count >> PAGE_CACHE_SHIFT;
+ npages = count >> PAGE_SHIFT;
if (rw == OBD_BRW_WRITE)
brw_flags = OBD_BRW_ASYNC;
@@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
for (i = 0, pgp = pga, off = offset;
i < npages;
- i++, pgp++, off += PAGE_CACHE_SIZE) {
+ i++, pgp++, off += PAGE_SIZE) {
LASSERT(!pgp->pg); /* for cleanup */
@@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
goto out;
pages[i] = pgp->pg;
- pgp->count = PAGE_CACHE_SIZE;
+ pgp->count = PAGE_SIZE;
pgp->off = off;
pgp->flag = brw_flags;
@@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
return -EINVAL;
- npages = batch >> PAGE_CACHE_SHIFT;
- tot_pages = count >> PAGE_CACHE_SHIFT;
+ npages = batch >> PAGE_SHIFT;
+ tot_pages = count >> PAGE_SHIFT;
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (tot_pages < npages)
npages = tot_pages;
- for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
+ for (i = 0; i < npages; i++, off += PAGE_SIZE) {
rnb[i].offset = off;
- rnb[i].len = PAGE_CACHE_SIZE;
+ rnb[i].len = PAGE_SIZE;
rnb[i].flags = brw_flags;
}
@@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
{
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
- LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+ LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
return echo_client_init();
}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 57c43c506ef2..a3358c39b2f1 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
if (rc)
return rc;
- pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+ pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number <= 0 ||
- pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+ pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
+ cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
osc_wake_cache_waiters(cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
struct client_obd *cli = &dev->u.cli;
- int shift = 20 - PAGE_CACHE_SHIFT;
+ int shift = 20 - PAGE_SHIFT;
seq_printf(m,
"used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
return -EFAULT;
kernbuf[count] = 0;
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
kernbuf;
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
/* if the max_pages is specified in bytes, convert to pages */
if (val >= ONE_MB_BRW_SIZE)
- val >>= PAGE_CACHE_SHIFT;
+ val >>= PAGE_SHIFT;
- chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
+ chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
- if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
+ if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
return -ERANGE;
}
client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 63363111380c..5f25bf83dcfc 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
return -ERANGE;
LASSERT(cur->oe_osclock == victim->oe_osclock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+ ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
- ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
+ ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
chunk_mask = ~((1 << ppc_bits) - 1);
chunksize = 1 << cli->cl_chunkbits;
chunk = index >> ppc_bits;
@@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (!sent) {
lost_grant = ext->oe_grants;
- } else if (blocksize < PAGE_CACHE_SIZE &&
- last_count != PAGE_CACHE_SIZE) {
+ } else if (blocksize < PAGE_SIZE &&
+ last_count != PAGE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check.
@@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (end)
count += blocksize - end;
- lost_grant = PAGE_CACHE_SIZE - count;
+ lost_grant = PAGE_SIZE - count;
}
if (ext->oe_grants > 0)
osc_free_grant(cli, nr_pages, lost_grant);
@@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
struct osc_async_page *oap;
struct osc_async_page *tmp;
int pages_in_chunk = 0;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
__u64 trunc_chunk = trunc_index >> ppc_bits;
int grants = 0;
int nr_pages = 0;
@@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
- LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
+ LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
}
@@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
*/
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+ oap->oap_count = PAGE_SIZE - oap->oap_page_off;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
}
}
@@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
@@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
return 0;
else if (cl_offset(obj, page->cp_index + 1) > kms)
/* catch sub-page write at end of file */
- return kms % PAGE_CACHE_SIZE;
+ return kms % PAGE_SIZE;
else
- return PAGE_CACHE_SIZE;
+ return PAGE_SIZE;
}
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += PAGE_CACHE_SIZE;
+ cli->cl_dirty += PAGE_SIZE;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- PAGE_CACHE_SIZE, pga, pga->pg);
+ PAGE_SIZE, pga, pga->pg);
osc_update_next_shrink(cli);
}
@@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
pga->flag &= ~OBD_BRW_FROM_GRANT;
atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= PAGE_CACHE_SIZE;
+ cli->cl_dirty -= PAGE_SIZE;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
+ cli->cl_dirty_transit -= PAGE_SIZE;
}
}
@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
@@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
client_obd_list_lock(&cli->cl_loi_list_lock);
atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
+ cli->cl_dirty -= nr_pages << PAGE_SHIFT;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
@@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
if (rc < 0)
return 0;
- if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
+ if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
- cli->cl_dirty_transit += PAGE_CACHE_SIZE;
+ cli->cl_dirty_transit += PAGE_SIZE;
atomic_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
@@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* of queued writes and create a discontiguous rpc stream
*/
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < PAGE_CACHE_SIZE ||
+ cli->cl_dirty_max < PAGE_SIZE ||
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
rc = -EDQUOT;
goto out;
@@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
- if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
+ if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
(atomic_read(&obd_dirty_pages) + 1 >
obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index d720b1a1c18c..ce9ddd515f64 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
int result;
opg->ops_from = 0;
- opg->ops_to = PAGE_CACHE_SIZE;
+ opg->ops_to = PAGE_SIZE;
result = osc_prep_async_page(osc, opg, vmpage,
cl_offset(obj, page->cp_index));
@@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and..
*/
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
+static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
+static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 74805f1ae888..30526ebcad04 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_undirty = 0;
} else {
long max_in_flight = (cli->cl_max_pages_per_rpc <<
- PAGE_CACHE_SHIFT)*
+ PAGE_SHIFT)*
(cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
}
@@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
static int osc_shrink_grant(struct client_obd *cli)
{
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
- (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+ (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
client_obd_list_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
client_obd_list_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
@@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance.
*/
- if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
* Keep comment here so that it can be found by searching.
*/
- int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
client->cl_avail_grant > brw_size)
@@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
}
/* determine the appropriate chunk size used by osc_extent. */
- cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
+ cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
LASSERTF(page_count == 1 ||
- (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+ (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
ergo(i > 0 && i < page_count - 1,
- poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
+ poff == 0 && pg->count == PAGE_SIZE) &&
ergo(i == page_count - 1, poff == 0)),
"i: %d/%d pg: %p off: %llu, count: %u\n",
i, page_count, pg, pg->off, pg->count);
@@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
oap->oap_count;
else
LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
}
}
@@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
tmp->oap_request = ptlrpc_request_addref(req);
client_obd_list_lock(&cli->cl_loi_list_lock);
- starting_offset >>= PAGE_CACHE_SHIFT;
+ starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2790,12 +2790,12 @@ out:
CFS_PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
- fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
+ fm_key->fiemap.fm_start + PAGE_SIZE - 1)
policy.l_extent.end = OBD_OBJECT_EOF;
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
- PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
+ PAGE_SIZE - 1) & CFS_PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 1b7673eec4d7..cf3ac8eee9ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
LASSERT(page);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
- LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+ LASSERT(pageoffset + len <= PAGE_SIZE);
desc->bd_nob += len;
if (pin)
- page_cache_get(page);
+ get_page(page);
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
@@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
if (unpin) {
for (i = 0; i < desc->bd_iov_count; i++)
- page_cache_release(desc->bd_iov[i].kiov_page);
+ put_page(desc->bd_iov[i].kiov_page);
}
kfree(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index b4eddf291269..cd94fed0ffdf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1092,7 +1092,7 @@ finish:
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
+ min(ocd->ocd_brw_size >> PAGE_SHIFT,
cli->cl_max_pages_per_rpc);
else if (imp->imp_connect_op == MDS_CONNECT ||
imp->imp_connect_op == MGS_CONNECT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cee04efb6fb5..c95a91ce26c9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
* hose a kernel by allowing the request history to grow too
* far.
*/
- bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (val > totalram_pages / (2 * bufpages))
return -ERANGE;
@@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
const char prefix[] = "connection=";
const int prefix_len = sizeof(prefix) - 1;
- if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
+ if (count > PAGE_SIZE - 1 || count <= prefix_len)
return -EINVAL;
kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 5f27d9c2e4ef..30d9a164e52d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
}
list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
- LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+ LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
if (!ptlrpc_no_resend(req))
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 72d5b9bf5b29..d3872b8c9a6e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -58,7 +58,7 @@
* bulk encryption page pools *
****************************************/
-#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
+#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
#define PAGES_PER_POOL (POINTERS_PER_PAGE)
#define IDLE_IDX_MAX (100)
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
new file mode 100644
index 000000000000..d277f048789e
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -0,0 +1,35 @@
+config FB_OLPC_DCON
+ tristate "One Laptop Per Child Display CONtroller support"
+ depends on OLPC && FB
+ depends on I2C
+ depends on (GPIO_CS5535 || GPIO_CS5535=n)
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ In order to support very low power operation, the XO laptop uses a
+ secondary Display CONtroller, or DCON. This secondary controller
+ is present in the video pipeline between the primary display
+ controller (integrate into the processor or chipset) and the LCD
+ panel. It allows the main processor/display controller to be
+ completely powered off while still retaining an image on the display.
+ This controller is only available on OLPC platforms. Unless you have
+ one of these platforms, you will want to say 'N'.
+
+config FB_OLPC_DCON_1
+ bool "OLPC XO-1 DCON support"
+ depends on FB_OLPC_DCON && GPIO_CS5535
+ default y
+ ---help---
+ Enable support for the DCON in XO-1 model laptops. The kernel
+ communicates with the DCON using model-specific code. If you
+ have an XO-1 (or if you're unsure what model you have), you should
+ say 'Y'.
+
+config FB_OLPC_DCON_1_5
+ bool "OLPC XO-1.5 DCON support"
+ depends on FB_OLPC_DCON && ACPI
+ default y
+ ---help---
+ Enable support for the DCON in XO-1.5 model laptops. The kernel
+ communicates with the DCON using model-specific code. If you
+ have an XO-1.5 (or if you're unsure what model you have), you
+ should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
new file mode 100644
index 000000000000..36c7e67fec20
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -0,0 +1,6 @@
+olpc-dcon-objs += olpc_dcon.o
+olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
+olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
+obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
+
+
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
new file mode 100644
index 000000000000..61c2e65ac354
--- /dev/null
+++ b/drivers/staging/olpc_dcon/TODO
@@ -0,0 +1,9 @@
+TODO:
+ - see if vx855 gpio API can be made similar enough to cs5535 so we can
+ share more code
+ - allow simultaneous XO-1 and XO-1.5 support
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+copy:
+ Daniel Drake <dsd@laptop.org>
+ Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
new file mode 100644
index 000000000000..f45b2ef05f48
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -0,0 +1,813 @@
+/*
+ * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
+ *
+ * Copyright © 2006-2007 Red Hat, Inc.
+ * Copyright © 2006-2007 Advanced Micro Devices, Inc.
+ * Copyright © 2009 VIA Technology, Inc.
+ * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software. You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/reboot.h>
+#include <linux/olpc-ec.h>
+#include <asm/tsc.h>
+#include <asm/olpc.h>
+
+#include "olpc_dcon.h"
+
+/* Module definitions */
+
+static ushort resumeline = 898;
+module_param(resumeline, ushort, 0444);
+
+static struct dcon_platform_data *pdata;
+
+/* I2C structures */
+
+/* Platform devices */
+static struct platform_device *dcon_device;
+
+static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
+
+static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(dcon->client, reg, val);
+}
+
+static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
+{
+ return i2c_smbus_read_word_data(dcon->client, reg);
+}
+
+/* ===== API functions - these are called by a variety of users ==== */
+
+static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
+{
+ u16 ver;
+ int rc = 0;
+
+ ver = dcon_read(dcon, DCON_REG_ID);
+ if ((ver >> 8) != 0xDC) {
+ pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
+ rc = -ENXIO;
+ goto err;
+ }
+
+ if (is_init) {
+ pr_info("Discovered DCON version %x\n", ver & 0xFF);
+ rc = pdata->init(dcon);
+ if (rc != 0) {
+ pr_err("Unable to init.\n");
+ goto err;
+ }
+ }
+
+ if (ver < 0xdc02) {
+ dev_err(&dcon->client->dev,
+ "DCON v1 is unsupported, giving up..\n");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* SDRAM setup/hold time */
+ dcon_write(dcon, 0x3a, 0xc040);
+ dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
+ dcon_write(dcon, DCON_REG_MEM_OPT_A,
+ MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
+ dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
+
+ /* Colour swizzle, AA, no passthrough, backlight */
+ if (is_init) {
+ dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
+ MODE_CSWIZZLE | MODE_COL_AA;
+ }
+ dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+
+ /* Set the scanline to interrupt on during resume */
+ dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
+
+err:
+ return rc;
+}
+
+/*
+ * The smbus doesn't always come back due to what is believed to be
+ * hardware (power rail) bugs. For older models where this is known to
+ * occur, our solution is to attempt to wait for the bus to stabilize;
+ * if it doesn't happen, cut power to the dcon, repower it, and wait
+ * for the bus to stabilize. Rinse, repeat until we have a working
+ * smbus. For newer models, we simply BUG(); we want to know if this
+ * still happens despite the power fixes that have been made!
+ */
+static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
+{
+ unsigned long timeout;
+ u8 pm;
+ int x;
+
+power_up:
+ if (is_powered_down) {
+ pm = 1;
+ x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+ if (x) {
+ pr_warn("unable to force dcon to power up: %d!\n", x);
+ return x;
+ }
+ usleep_range(10000, 11000); /* we'll be conservative */
+ }
+
+ pdata->bus_stabilize_wiggle();
+
+ for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
+ usleep_range(1000, 1100);
+ x = dcon_read(dcon, DCON_REG_ID);
+ }
+ if (x < 0) {
+ pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
+ BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
+ pm = 0;
+ olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+ msleep(100);
+ is_powered_down = 1;
+ goto power_up; /* argh, stupid hardware.. */
+ }
+
+ if (is_powered_down)
+ return dcon_hw_init(dcon, 0);
+ return 0;
+}
+
+static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
+{
+ dcon->bl_val = level;
+ dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
+
+ /* Purposely turn off the backlight when we go to level 0 */
+ if (dcon->bl_val == 0) {
+ dcon->disp_mode &= ~MODE_BL_ENABLE;
+ dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+ } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
+ dcon->disp_mode |= MODE_BL_ENABLE;
+ dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+ }
+}
+
+/* Set the output type to either color or mono */
+static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
+{
+ if (dcon->mono == enable_mono)
+ return 0;
+
+ dcon->mono = enable_mono;
+
+ if (enable_mono) {
+ dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
+ dcon->disp_mode |= MODE_MONO_LUMA;
+ } else {
+ dcon->disp_mode &= ~(MODE_MONO_LUMA);
+ dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
+ }
+
+ dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+ return 0;
+}
+
+/* For now, this will be really stupid - we need to address how
+ * DCONLOAD works in a sleep and account for it accordingly
+ */
+
+static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
+{
+ int x;
+
+ /* Turn off the backlight and put the DCON to sleep */
+
+ if (dcon->asleep == sleep)
+ return;
+
+ if (!olpc_board_at_least(olpc_board(0xc2)))
+ return;
+
+ if (sleep) {
+ u8 pm = 0;
+
+ x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+ if (x)
+ pr_warn("unable to force dcon to power down: %d!\n", x);
+ else
+ dcon->asleep = sleep;
+ } else {
+ /* Only re-enable the backlight if the backlight value is set */
+ if (dcon->bl_val != 0)
+ dcon->disp_mode |= MODE_BL_ENABLE;
+ x = dcon_bus_stabilize(dcon, 1);
+ if (x)
+ pr_warn("unable to reinit dcon hardware: %d!\n", x);
+ else
+ dcon->asleep = sleep;
+
+ /* Restore backlight */
+ dcon_set_backlight(dcon, dcon->bl_val);
+ }
+
+ /* We should turn off some stuff in the framebuffer - but what? */
+}
+
+/* the DCON seems to get confused if we change DCONLOAD too
+ * frequently -- i.e., approximately faster than frame time.
+ * normally we don't change it this fast, so in general we won't
+ * delay here.
+ */
+static void dcon_load_holdoff(struct dcon_priv *dcon)
+{
+ ktime_t delta_t, now;
+
+ while (1) {
+ now = ktime_get();
+ delta_t = ktime_sub(now, dcon->load_time);
+ if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
+ break;
+ mdelay(4);
+ }
+}
+
+static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
+{
+ int err;
+
+ console_lock();
+ if (!lock_fb_info(dcon->fbinfo)) {
+ console_unlock();
+ dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
+ return false;
+ }
+
+ dcon->ignore_fb_events = true;
+ err = fb_blank(dcon->fbinfo,
+ blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
+ dcon->ignore_fb_events = false;
+ unlock_fb_info(dcon->fbinfo);
+ console_unlock();
+
+ if (err) {
+ dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
+ blank ? "" : "un");
+ return false;
+ }
+ return true;
+}
+
+/* Set the source of the display (CPU or DCON) */
+static void dcon_source_switch(struct work_struct *work)
+{
+ struct dcon_priv *dcon = container_of(work, struct dcon_priv,
+ switch_source);
+ int source = dcon->pending_src;
+
+ if (dcon->curr_src == source)
+ return;
+
+ dcon_load_holdoff(dcon);
+
+ dcon->switched = false;
+
+ switch (source) {
+ case DCON_SOURCE_CPU:
+ pr_info("dcon_source_switch to CPU\n");
+ /* Enable the scanline interrupt bit */
+ if (dcon_write(dcon, DCON_REG_MODE,
+ dcon->disp_mode | MODE_SCAN_INT))
+ pr_err("couldn't enable scanline interrupt!\n");
+ else
+ /* Wait up to one second for the scanline interrupt */
+ wait_event_timeout(dcon->waitq, dcon->switched, HZ);
+
+ if (!dcon->switched)
+ pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
+
+ /* Turn off the scanline interrupt */
+ if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
+ pr_err("couldn't disable scanline interrupt!\n");
+
+ /*
+ * Ideally we'd like to disable interrupts here so that the
+ * fb unblanking and DCON turn on happen at a known time value;
+ * however, we can't do that right now with fb_blank
+ * messing with semaphores.
+ *
+ * For now, we just hope..
+ */
+ if (!dcon_blank_fb(dcon, false)) {
+ pr_err("Failed to enter CPU mode\n");
+ dcon->pending_src = DCON_SOURCE_DCON;
+ return;
+ }
+
+ /* And turn off the DCON */
+ pdata->set_dconload(1);
+ dcon->load_time = ktime_get();
+
+ pr_info("The CPU has control\n");
+ break;
+ case DCON_SOURCE_DCON:
+ {
+ ktime_t delta_t;
+
+ pr_info("dcon_source_switch to DCON\n");
+
+ /* Clear DCONLOAD - this implies that the DCON is in control */
+ pdata->set_dconload(0);
+ dcon->load_time = ktime_get();
+
+ wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
+
+ if (!dcon->switched) {
+ pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
+ } else {
+ /* sometimes the DCON doesn't follow its own rules,
+ * and doesn't wait for two vsync pulses before
+ * ack'ing the frame load with an IRQ. the result
+ * is that the display shows the *previously*
+ * loaded frame. we can detect this by looking at
+ * the time between asserting DCONLOAD and the IRQ --
+ * if it's less than 20msec, then the DCON couldn't
+ * have seen two VSYNC pulses. in that case we
+ * deassert and reassert, and hope for the best.
+ * see http://dev.laptop.org/ticket/9664
+ */
+ delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
+ if (dcon->switched && ktime_to_ns(delta_t)
+ < NSEC_PER_MSEC * 20) {
+ pr_err("missed loading, retrying\n");
+ pdata->set_dconload(1);
+ mdelay(41);
+ pdata->set_dconload(0);
+ dcon->load_time = ktime_get();
+ mdelay(41);
+ }
+ }
+
+ dcon_blank_fb(dcon, true);
+ pr_info("The DCON has control\n");
+ break;
+ }
+ default:
+ BUG();
+ }
+
+ dcon->curr_src = source;
+}
+
+static void dcon_set_source(struct dcon_priv *dcon, int arg)
+{
+ if (dcon->pending_src == arg)
+ return;
+
+ dcon->pending_src = arg;
+
+ if (dcon->curr_src != arg)
+ schedule_work(&dcon->switch_source);
+}
+
+static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
+{
+ dcon_set_source(dcon, arg);
+ flush_scheduled_work();
+}
+
+static ssize_t dcon_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%4.4X\n", dcon->disp_mode);
+}
+
+static ssize_t dcon_sleep_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dcon->asleep);
+}
+
+static ssize_t dcon_freeze_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
+}
+
+static ssize_t dcon_mono_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dcon->mono);
+}
+
+static ssize_t dcon_resumeline_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", resumeline);
+}
+
+static ssize_t dcon_mono_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long enable_mono;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &enable_mono);
+ if (rc)
+ return rc;
+
+ dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
+
+ return count;
+}
+
+static ssize_t dcon_freeze_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dcon_priv *dcon = dev_get_drvdata(dev);
+ unsigned long output;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &output);
+ if (ret)
+ return ret;
+
+ pr_info("dcon_freeze_store: %lu\n", output);
+
+ switch (output) {
+ case 0:
+ dcon_set_source(dcon, DCON_SOURCE_CPU);
+ break;
+ case 1:
+ dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
+ break;
+ case 2: /* normally unused */
+ dcon_set_source(dcon, DCON_SOURCE_DCON);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t dcon_resumeline_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned short rl;
+ int rc;
+
+ rc = kstrtou16(buf, 10, &rl);
+ if (rc)
+ return rc;
+
+ resumeline = rl;
+ dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
+
+ return count;
+}
+
+static ssize_t dcon_sleep_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long output;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &output);
+ if (ret)
+ return ret;
+
+ dcon_sleep(dev_get_drvdata(dev), output ? true : false);
+ return count;
+}
+
+static struct device_attribute dcon_device_files[] = {
+ __ATTR(mode, 0444, dcon_mode_show, NULL),
+ __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
+ __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
+ __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
+ __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
+};
+
+static int dcon_bl_update(struct backlight_device *dev)
+{
+ struct dcon_priv *dcon = bl_get_data(dev);
+ u8 level = dev->props.brightness & 0x0F;
+
+ if (dev->props.power != FB_BLANK_UNBLANK)
+ level = 0;
+
+ if (level != dcon->bl_val)
+ dcon_set_backlight(dcon, level);
+
+ /* power down the DCON when the screen is blanked */
+ if (!dcon->ignore_fb_events)
+ dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
+
+ return 0;
+}
+
+static int dcon_bl_get(struct backlight_device *dev)
+{
+ struct dcon_priv *dcon = bl_get_data(dev);
+
+ return dcon->bl_val;
+}
+
+static const struct backlight_ops dcon_bl_ops = {
+ .update_status = dcon_bl_update,
+ .get_brightness = dcon_bl_get,
+};
+
+static struct backlight_properties dcon_bl_props = {
+ .max_brightness = 15,
+ .type = BACKLIGHT_RAW,
+ .power = FB_BLANK_UNBLANK,
+};
+
+static int dcon_reboot_notify(struct notifier_block *nb,
+ unsigned long foo, void *bar)
+{
+ struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
+
+ if (!dcon || !dcon->client)
+ return NOTIFY_DONE;
+
+ /* Turn off the DCON. Entirely. */
+ dcon_write(dcon, DCON_REG_MODE, 0x39);
+ dcon_write(dcon, DCON_REG_MODE, 0x32);
+ return NOTIFY_DONE;
+}
+
+static int unfreeze_on_panic(struct notifier_block *nb,
+ unsigned long e, void *p)
+{
+ pdata->set_dconload(1);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dcon_panic_nb = {
+ .notifier_call = unfreeze_on_panic,
+};
+
+static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct dcon_priv *dcon;
+ int rc, i, j;
+
+ if (!pdata)
+ return -ENXIO;
+
+ dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
+ if (!dcon)
+ return -ENOMEM;
+
+ dcon->client = client;
+ init_waitqueue_head(&dcon->waitq);
+ INIT_WORK(&dcon->switch_source, dcon_source_switch);
+ dcon->reboot_nb.notifier_call = dcon_reboot_notify;
+ dcon->reboot_nb.priority = -1;
+
+ i2c_set_clientdata(client, dcon);
+
+ if (num_registered_fb < 1) {
+ dev_err(&client->dev, "DCON driver requires a registered fb\n");
+ rc = -EIO;
+ goto einit;
+ }
+ dcon->fbinfo = registered_fb[0];
+
+ rc = dcon_hw_init(dcon, 1);
+ if (rc)
+ goto einit;
+
+ /* Add the DCON device */
+
+ dcon_device = platform_device_alloc("dcon", -1);
+
+ if (!dcon_device) {
+ pr_err("Unable to create the DCON device\n");
+ rc = -ENOMEM;
+ goto eirq;
+ }
+ rc = platform_device_add(dcon_device);
+ platform_set_drvdata(dcon_device, dcon);
+
+ if (rc) {
+ pr_err("Unable to add the DCON device\n");
+ goto edev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
+ rc = device_create_file(&dcon_device->dev,
+ &dcon_device_files[i]);
+ if (rc) {
+ dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
+ goto ecreate;
+ }
+ }
+
+ dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
+
+ /* Add the backlight device for the DCON */
+ dcon_bl_props.brightness = dcon->bl_val;
+ dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
+ dcon, &dcon_bl_ops, &dcon_bl_props);
+ if (IS_ERR(dcon->bl_dev)) {
+ dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
+ PTR_ERR(dcon->bl_dev));
+ dcon->bl_dev = NULL;
+ }
+
+ register_reboot_notifier(&dcon->reboot_nb);
+ atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
+
+ return 0;
+
+ ecreate:
+ for (j = 0; j < i; j++)
+ device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
+ edev:
+ platform_device_unregister(dcon_device);
+ dcon_device = NULL;
+ eirq:
+ free_irq(DCON_IRQ, dcon);
+ einit:
+ kfree(dcon);
+ return rc;
+}
+
+static int dcon_remove(struct i2c_client *client)
+{
+ struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+ unregister_reboot_notifier(&dcon->reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
+
+ free_irq(DCON_IRQ, dcon);
+
+ backlight_device_unregister(dcon->bl_dev);
+
+ if (dcon_device)
+ platform_device_unregister(dcon_device);
+ cancel_work_sync(&dcon->switch_source);
+
+ kfree(dcon);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dcon_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+ if (!dcon->asleep) {
+ /* Set up the DCON to have the source */
+ dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
+ }
+
+ return 0;
+}
+
+static int dcon_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+ if (!dcon->asleep) {
+ dcon_bus_stabilize(dcon, 0);
+ dcon_set_source(dcon, DCON_SOURCE_CPU);
+ }
+
+ return 0;
+}
+
+#else
+
+#define dcon_suspend NULL
+#define dcon_resume NULL
+
+#endif /* CONFIG_PM */
+
+irqreturn_t dcon_interrupt(int irq, void *id)
+{
+ struct dcon_priv *dcon = id;
+ u8 status;
+
+ if (pdata->read_status(&status))
+ return IRQ_NONE;
+
+ switch (status & 3) {
+ case 3:
+ pr_debug("DCONLOAD_MISSED interrupt\n");
+ break;
+
+ case 2: /* switch to DCON mode */
+ case 1: /* switch to CPU mode */
+ dcon->switched = true;
+ dcon->irq_time = ktime_get();
+ wake_up(&dcon->waitq);
+ break;
+
+ case 0:
+ /* workaround resume case: the DCON (on 1.5) doesn't
+ * ever assert status 0x01 when switching to CPU mode
+ * during resume. this is because DCONLOAD is de-asserted
+ * _immediately_ upon exiting S3, so the actual release
+ * of the DCON happened long before this point.
+ * see http://dev.laptop.org/ticket/9869
+ */
+ if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
+ dcon->switched = true;
+ dcon->irq_time = ktime_get();
+ wake_up(&dcon->waitq);
+ pr_debug("switching w/ status 0/0\n");
+ } else {
+ pr_debug("scanline interrupt w/CPU\n");
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct dev_pm_ops dcon_pm_ops = {
+ .suspend = dcon_suspend,
+ .resume = dcon_resume,
+};
+
+static const struct i2c_device_id dcon_idtable[] = {
+ { "olpc_dcon", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, dcon_idtable);
+
+static struct i2c_driver dcon_driver = {
+ .driver = {
+ .name = "olpc_dcon",
+ .pm = &dcon_pm_ops,
+ },
+ .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
+ .id_table = dcon_idtable,
+ .probe = dcon_probe,
+ .remove = dcon_remove,
+ .detect = dcon_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init olpc_dcon_init(void)
+{
+#ifdef CONFIG_FB_OLPC_DCON_1_5
+ /* XO-1.5 */
+ if (olpc_board_at_least(olpc_board(0xd0)))
+ pdata = &dcon_pdata_xo_1_5;
+#endif
+#ifdef CONFIG_FB_OLPC_DCON_1
+ if (!pdata)
+ pdata = &dcon_pdata_xo_1;
+#endif
+
+ return i2c_add_driver(&dcon_driver);
+}
+
+static void __exit olpc_dcon_exit(void)
+{
+ i2c_del_driver(&dcon_driver);
+}
+
+module_init(olpc_dcon_init);
+module_exit(olpc_dcon_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
new file mode 100644
index 000000000000..215e7ec4dea2
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -0,0 +1,111 @@
+#ifndef OLPC_DCON_H_
+#define OLPC_DCON_H_
+
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+
+/* DCON registers */
+
+#define DCON_REG_ID 0
+#define DCON_REG_MODE 1
+
+#define MODE_PASSTHRU (1<<0)
+#define MODE_SLEEP (1<<1)
+#define MODE_SLEEP_AUTO (1<<2)
+#define MODE_BL_ENABLE (1<<3)
+#define MODE_BLANK (1<<4)
+#define MODE_CSWIZZLE (1<<5)
+#define MODE_COL_AA (1<<6)
+#define MODE_MONO_LUMA (1<<7)
+#define MODE_SCAN_INT (1<<8)
+#define MODE_CLOCKDIV (1<<9)
+#define MODE_DEBUG (1<<14)
+#define MODE_SELFTEST (1<<15)
+
+#define DCON_REG_HRES 0x2
+#define DCON_REG_HTOTAL 0x3
+#define DCON_REG_HSYNC_WIDTH 0x4
+#define DCON_REG_VRES 0x5
+#define DCON_REG_VTOTAL 0x6
+#define DCON_REG_VSYNC_WIDTH 0x7
+#define DCON_REG_TIMEOUT 0x8
+#define DCON_REG_SCAN_INT 0x9
+#define DCON_REG_BRIGHT 0xa
+#define DCON_REG_MEM_OPT_A 0x41
+#define DCON_REG_MEM_OPT_B 0x42
+
+/* Load Delay Locked Loop (DLL) settings for clock delay */
+#define MEM_DLL_CLOCK_DELAY (1<<0)
+/* Memory controller power down function */
+#define MEM_POWER_DOWN (1<<8)
+/* Memory controller software reset */
+#define MEM_SOFT_RESET (1<<0)
+
+/* Status values */
+
+#define DCONSTAT_SCANINT 0
+#define DCONSTAT_SCANINT_DCON 1
+#define DCONSTAT_DISPLAYLOAD 2
+#define DCONSTAT_MISSED 3
+
+/* Source values */
+
+#define DCON_SOURCE_DCON 0
+#define DCON_SOURCE_CPU 1
+
+/* Interrupt */
+#define DCON_IRQ 6
+
+struct dcon_priv {
+ struct i2c_client *client;
+ struct fb_info *fbinfo;
+ struct backlight_device *bl_dev;
+
+ wait_queue_head_t waitq;
+ struct work_struct switch_source;
+ struct notifier_block reboot_nb;
+
+ /* Shadow register for the DCON_REG_MODE register */
+ u8 disp_mode;
+
+ /* The current backlight value - this saves us some smbus traffic */
+ u8 bl_val;
+
+ /* Current source, initialized at probe time */
+ int curr_src;
+
+ /* Desired source */
+ int pending_src;
+
+ /* Variables used during switches */
+ bool switched;
+ ktime_t irq_time;
+ ktime_t load_time;
+
+ /* Current output type; true == mono, false == color */
+ bool mono;
+ bool asleep;
+ /* This get set while controlling fb blank state from the driver */
+ bool ignore_fb_events;
+};
+
+struct dcon_platform_data {
+ int (*init)(struct dcon_priv *);
+ void (*bus_stabilize_wiggle)(void);
+ void (*set_dconload)(int);
+ int (*read_status)(u8 *);
+};
+
+#include <linux/interrupt.h>
+
+irqreturn_t dcon_interrupt(int irq, void *id);
+
+#ifdef CONFIG_FB_OLPC_DCON_1
+extern struct dcon_platform_data dcon_pdata_xo_1;
+#endif
+
+#ifdef CONFIG_FB_OLPC_DCON_1_5
+extern struct dcon_platform_data dcon_pdata_xo_1_5;
+#endif
+
+#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
new file mode 100644
index 000000000000..0c5a10c69401
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -0,0 +1,205 @@
+/*
+ * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
+ *
+ * Copyright © 2006-2007 Red Hat, Inc.
+ * Copyright © 2006-2007 Advanced Micro Devices, Inc.
+ * Copyright © 2009 VIA Technology, Inc.
+ * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software. You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <asm/olpc.h>
+
+#include "olpc_dcon.h"
+
+static int dcon_init_xo_1(struct dcon_priv *dcon)
+{
+ unsigned char lob;
+
+ if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+ pr_err("failed to request STAT0 GPIO\n");
+ return -EIO;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+ pr_err("failed to request STAT1 GPIO\n");
+ goto err_gp_stat1;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+ pr_err("failed to request IRQ GPIO\n");
+ goto err_gp_irq;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+ pr_err("failed to request LOAD GPIO\n");
+ goto err_gp_load;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+ pr_err("failed to request BLANK GPIO\n");
+ goto err_gp_blank;
+ }
+
+ /* Turn off the event enable for GPIO7 just to be safe */
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+ /*
+ * Determine the current state by reading the GPIO bit; earlier
+ * stages of the boot process have established the state.
+ *
+ * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
+ * this is because OFW will disable input for the pin and set a value..
+ * READ_BACK will only contain a valid value if input is enabled and
+ * then a value is set. So, future readings of the pin can use
+ * READ_BACK, but the first one cannot. Awesome, huh?
+ */
+ dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+ ? DCON_SOURCE_CPU
+ : DCON_SOURCE_DCON;
+ dcon->pending_src = dcon->curr_src;
+
+ /* Set the directions for the GPIO pins */
+ gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+ gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+ gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+ gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+ gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+ dcon->curr_src == DCON_SOURCE_CPU);
+
+ /* Set up the interrupt mappings */
+
+ /* Set the IRQ to pair 2 */
+ cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
+
+ /* Enable group 2 to trigger the DCON interrupt */
+ cs5535_gpio_set_irq(2, DCON_IRQ);
+
+ /* Select edge level for interrupt (in PIC) */
+ lob = inb(0x4d0);
+ lob &= ~(1 << DCON_IRQ);
+ outb(lob, 0x4d0);
+
+ /* Register the interrupt handler */
+ if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
+ pr_err("failed to request DCON's irq\n");
+ goto err_req_irq;
+ }
+
+ /* Clear INV_EN for GPIO7 (DCONIRQ) */
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
+
+ /* Enable filter for GPIO12 (DCONBLANK) */
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
+
+ /* Disable filter for GPIO7 */
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
+
+ /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
+
+ /* Add GPIO12 to the Filter Event Pair #7 */
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
+
+ /* Turn off negative Edge Enable for GPIO12 */
+ cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
+
+ /* Enable negative Edge Enable for GPIO7 */
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
+
+ /* Zero the filter amount for Filter Event Pair #7 */
+ cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
+
+ /* Clear the negative edge status for GPIO7 and GPIO12 */
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
+
+ /* FIXME: Clear the positive status as well, just to be sure */
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
+
+ /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
+
+ return 0;
+
+err_req_irq:
+ gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+ gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+ gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+ gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+ gpio_free(OLPC_GPIO_DCON_STAT0);
+ return -EIO;
+}
+
+static void dcon_wiggle_xo_1(void)
+{
+ int x;
+
+ /*
+ * According to HiMax, when powering the DCON up we should hold
+ * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
+ * state machine to reset to a (sane) initial state. Mitch Bradley
+ * did some testing and discovered that holding for 16 SMB_CLK cycles
+ * worked a lot more reliably, so that's what we do here.
+ *
+ * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
+ * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
+ * GPIO15.
+ */
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+
+ for (x = 0; x < 16; x++) {
+ udelay(5);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+ udelay(5);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+ }
+ udelay(5);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+}
+
+static void dcon_set_dconload_1(int val)
+{
+ gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
+}
+
+static int dcon_read_status_xo_1(u8 *status)
+{
+ *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+ *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
+ /* Clear the negative edge status for GPIO7 */
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+
+ return 0;
+}
+
+struct dcon_platform_data dcon_pdata_xo_1 = {
+ .init = dcon_init_xo_1,
+ .bus_stabilize_wiggle = dcon_wiggle_xo_1,
+ .set_dconload = dcon_set_dconload_1,
+ .read_status = dcon_read_status_xo_1,
+};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
new file mode 100644
index 000000000000..6a4d379c16a3
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2009,2010 One Laptop per Child
+ *
+ * This program is free software. You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/olpc.h>
+
+/* TODO: this eventually belongs in linux/vx855.h */
+#define NR_VX855_GPI 14
+#define NR_VX855_GPO 13
+#define NR_VX855_GPIO 15
+
+#define VX855_GPI(n) (n)
+#define VX855_GPO(n) (NR_VX855_GPI + (n))
+#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n))
+
+#include "olpc_dcon.h"
+
+/* Hardware setup on the XO 1.5:
+ * DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
+ * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver
+ * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
+ * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
+ * DCONIRQ connects to VX855_GPIO12
+ * DCONSMBDATA connects to VX855 graphics CRTSPD
+ * DCONSMBCLK connects to VX855 graphics CRTSPCLK
+ */
+
+#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
+#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */
+#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */
+#define BIT_GPIO12 0x40
+
+#define PREFIX "OLPC DCON:"
+
+static void dcon_clear_irq(void)
+{
+ /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
+ outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
+}
+
+static int dcon_was_irq(void)
+{
+ u_int8_t tmp;
+
+ /* irq status will appear in PMIO_Rx50[6] on gpio12 */
+ tmp = inb(VX855_GPI_STATUS_CHG);
+ return !!(tmp & BIT_GPIO12);
+
+ return 0;
+}
+
+static int dcon_init_xo_1_5(struct dcon_priv *dcon)
+{
+ unsigned int irq;
+
+ dcon_clear_irq();
+
+ /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
+ outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+
+ /* Determine the current state of DCONLOAD, likely set by firmware */
+ /* GPIO1 */
+ dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
+ DCON_SOURCE_CPU : DCON_SOURCE_DCON;
+ dcon->pending_src = dcon->curr_src;
+
+ /* we're sharing the IRQ with ACPI */
+ irq = acpi_gbl_FADT.sci_interrupt;
+ if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
+ pr_err("DCON (IRQ%d) allocation failed\n", irq);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void set_i2c_line(int sda, int scl)
+{
+ unsigned char tmp;
+ unsigned int port = 0x26;
+
+ /* FIXME: This directly accesses the CRT GPIO controller !!! */
+ outb(port, 0x3c4);
+ tmp = inb(0x3c5);
+
+ if (scl)
+ tmp |= 0x20;
+ else
+ tmp &= ~0x20;
+
+ if (sda)
+ tmp |= 0x10;
+ else
+ tmp &= ~0x10;
+
+ tmp |= 0x01;
+
+ outb(port, 0x3c4);
+ outb(tmp, 0x3c5);
+}
+
+
+static void dcon_wiggle_xo_1_5(void)
+{
+ int x;
+
+ /*
+ * According to HiMax, when powering the DCON up we should hold
+ * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
+ * state machine to reset to a (sane) initial state. Mitch Bradley
+ * did some testing and discovered that holding for 16 SMB_CLK cycles
+ * worked a lot more reliably, so that's what we do here.
+ */
+ set_i2c_line(1, 1);
+
+ for (x = 0; x < 16; x++) {
+ udelay(5);
+ set_i2c_line(1, 0);
+ udelay(5);
+ set_i2c_line(1, 1);
+ }
+ udelay(5);
+
+ /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
+ outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+}
+
+static void dcon_set_dconload_xo_1_5(int val)
+{
+ gpio_set_value(VX855_GPIO(1), val);
+}
+
+static int dcon_read_status_xo_1_5(u8 *status)
+{
+ if (!dcon_was_irq())
+ return -1;
+
+ /* i believe this is the same as "inb(0x44b) & 3" */
+ *status = gpio_get_value(VX855_GPI(10));
+ *status |= gpio_get_value(VX855_GPI(11)) << 1;
+
+ dcon_clear_irq();
+
+ return 0;
+}
+
+struct dcon_platform_data dcon_pdata_xo_1_5 = {
+ .init = dcon_init_xo_1_5,
+ .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
+ .set_dconload = dcon_set_dconload_xo_1_5,
+ .read_status = dcon_read_status_xo_1_5,
+};
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
index 3e668d852f03..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_HFI1
tristate "Intel OPA Gen1 support"
depends on X86_64 && INFINIBAND_RDMAVT
select MMU_NOTIFIER
+ select CRC32
default m
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index a24443ba59ea..97e5b69e0668 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -779,14 +779,6 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
return 0;
}
-static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
-{
- struct iscsi_node_acl *acl = container_of(se_nacl,
- struct iscsi_node_acl, se_node_acl);
-
- configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group);
-}
-
/* End items for lio_target_acl_cit */
/* Start items for lio_target_tpg_attrib_cit */
@@ -1247,6 +1239,16 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
if (IS_ERR(tiqn))
return ERR_CAST(tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+ " %s\n", name);
+ return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_add_wwn_groups(struct se_wwn *wwn)
+{
+ struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
@@ -1271,12 +1273,6 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
"iscsi_logout_stats", &iscsi_stat_logout_cit);
configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
&tiqn->tiqn_wwn.fabric_stat_group);
-
-
- pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
- pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
- " %s\n", name);
- return &tiqn->tiqn_wwn;
}
static void lio_target_call_coredeltiqn(
@@ -1284,8 +1280,6 @@ static void lio_target_call_coredeltiqn(
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
- configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group);
-
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
tiqn->tiqn);
iscsit_del_tiqn(tiqn);
@@ -1660,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = {
.aborted_task = lio_aborted_task,
.fabric_make_wwn = lio_target_call_coreaddtiqn,
.fabric_drop_wwn = lio_target_call_coredeltiqn,
+ .add_wwn_groups = lio_target_add_wwn_groups,
.fabric_make_tpg = lio_target_tiqn_addtpg,
.fabric_drop_tpg = lio_target_tiqn_deltpg,
.fabric_make_np = lio_target_call_addnptotpg,
.fabric_drop_np = lio_target_call_delnpfromtpg,
.fabric_init_nodeacl = lio_target_init_nodeacl,
- .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl,
.tfc_discovery_attrs = lio_target_discovery_auth_attrs,
.tfc_wwn_attrs = lio_target_wwn_attrs,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 1bd5c72b663e..31a096aa16ab 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -338,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item)
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
- struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
- if (tf->tf_ops->fabric_cleanup_nodeacl)
- tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+ configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
core_tpg_del_initiator_node_acl(se_nacl);
}
@@ -383,14 +381,6 @@ static struct config_group *target_fabric_make_nodeacl(
if (IS_ERR(se_nacl))
return ERR_CAST(se_nacl);
- if (tf->tf_ops->fabric_init_nodeacl) {
- int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
- if (ret) {
- core_tpg_del_initiator_node_acl(se_nacl);
- return ERR_PTR(ret);
- }
- }
-
config_group_init_type_name(&se_nacl->acl_group, name,
&tf->tf_tpg_nacl_base_cit);
@@ -414,6 +404,15 @@ static struct config_group *target_fabric_make_nodeacl(
configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
&se_nacl->acl_group);
+ if (tf->tf_ops->fabric_init_nodeacl) {
+ int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+ if (ret) {
+ configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+ core_tpg_del_initiator_node_acl(se_nacl);
+ return ERR_PTR(ret);
+ }
+ }
+
return &se_nacl->acl_group;
}
@@ -892,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item)
struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
+ configfs_remove_default_groups(&wwn->fabric_stat_group);
tf->tf_ops->fabric_drop_wwn(wwn);
}
@@ -945,6 +945,8 @@ static struct config_group *target_fabric_make_wwn(
&tf->tf_wwn_fabric_stats_cit);
configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
+ if (tf->tf_ops->add_wwn_groups)
+ tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d26ed79bb4c..9b04d72e752e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2049,14 +2049,13 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
if (tty) {
mutex_unlock(&tty_mutex);
retval = tty_lock_interruptible(tty);
+ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
if (retval) {
if (retval == -EINTR)
retval = -ERESTARTSYS;
tty = ERR_PTR(retval);
goto out;
}
- /* safe to drop the kref from tty_driver_lookup_tty() */
- tty_kref_put(tty);
retval = tty_reopen(tty);
if (retval < 0) {
tty_unlock(tty);
@@ -2158,7 +2157,7 @@ retry_open:
read_lock(&tasklist_lock);
spin_lock_irq(&current->sighand->siglock);
noctty = (filp->f_flags & O_NOCTTY) ||
- device == MKDEV(TTY_MAJOR, 0) ||
+ (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
device == MKDEV(TTYAUX_MAJOR, 1) ||
(tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 83fd30b0577c..a6c4a1b895bd 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
int err;
unsigned long flags;
+ if (!cur) /* nothing to do */
+ return;
+
acm->putbuffer = NULL;
err = usb_autopm_get_interface_async(acm->control);
spin_lock_irqsave(&acm->write_lock, flags);
if (err < 0) {
cur->use = 0;
+ acm->putbuffer = cur;
goto out;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5eb1a87228b4..31ccdccd7a04 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -75,8 +75,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
* be the first thing immediately following the endpoint descriptor.
*/
desc = (struct usb_ss_ep_comp_descriptor *) buffer;
- buffer += desc->bLength;
- size -= desc->bLength;
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
size < USB_DT_SS_EP_COMP_SIZE) {
@@ -100,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->desc.wMaxPacketSize;
return;
}
-
+ buffer += desc->bLength;
+ size -= desc->bLength;
memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
/* Check the various values */
@@ -146,12 +145,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->ss_ep_comp.bmAttributes = 2;
}
- /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
- if (usb_endpoint_xfer_isoc(&ep->desc) &&
- USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
- usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
- ep, buffer, size);
-
if (usb_endpoint_xfer_isoc(&ep->desc))
max_tx = (desc->bMaxBurst + 1) *
(USB_SS_MULT(desc->bmAttributes)) *
@@ -171,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
max_tx);
ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
}
+ /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
+ if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
+ usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
+ ep, buffer, size);
}
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f9d42cf23e55..7859d738df41 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
+
+ /*
+ * Companion device should be either UHCI,OHCI or EHCI host
+ * controller, otherwise skip.
+ */
+ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+ companion->class != CL_EHCI)
+ continue;
+
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e9940dd004e4..818f158232bb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
{
u32 intmsk;
u32 val;
+ u32 usbcfg;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
* set configuration.
*/
+ /* keep other bits untouched (so e.g. forced modes are not lost) */
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+ GUSBCFG_HNPCAP);
+
/* set the PLL on, remove the HNP/SRP and set the PHY */
val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG);
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (val << GUSBCFG_USBTRDTIM_SHIFT);
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
dwc2_hsotg_init_fifo(hsotg);
@@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = {
static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
{
u32 trdtim;
+ u32 usbcfg;
/* unmask subset of endpoint interrupts */
dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
dwc2_hsotg_init_fifo(hsotg);
+ /* keep other bits untouched (so e.g. forced modes are not lost) */
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+ GUSBCFG_HNPCAP);
+
/* set the PLL on, remove the HNP/SRP and set the PHY */
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (trdtim << GUSBCFG_USBTRDTIM_SHIFT),
- hsotg->regs + GUSBCFG);
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
if (using_dma(hsotg))
__orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 17fd81447c9f..fa20f5a99d12 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
static int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
+ int retries = 1000;
int ret;
- /* Before Resetting PHY, put Core in Reset */
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg |= DWC3_GCTL_CORESOFTRESET;
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
- /* Assert USB3 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
-
- /* Assert USB2 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-
usb_phy_init(dwc->usb2_phy);
usb_phy_init(dwc->usb3_phy);
ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
phy_exit(dwc->usb2_generic_phy);
return ret;
}
- mdelay(100);
- /* Clear USB3 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ /*
+ * We're resetting only the device side because, if we're in host mode,
+ * XHCI driver will reset the host block. If dwc3 was configured for
+ * host-only mode, then we can return early.
+ */
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
- /* Clear USB2 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CSFTRST;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- mdelay(100);
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ return 0;
- /* After PHYs are stable we can take Core out of reset state */
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg &= ~DWC3_GCTL_CORESOFTRESET;
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ udelay(1);
+ } while (--retries);
- return 0;
+ return -ETIMEDOUT;
}
/**
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..72664700b8a2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -39,8 +39,6 @@
#define USBSS_IRQ_COREIRQ_EN BIT(0)
#define USBSS_IRQ_COREIRQ_CLR BIT(0)
-static u64 kdwc3_dma_mask;
-
struct dwc3_keystone {
struct device *dev;
struct clk *clk;
@@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev)
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
- kdwc3_dma_mask = dma_get_mask(dev);
- dev->dma_mask = &kdwc3_dma_mask;
-
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
error = clk_prepare_enable(kdwc->clk);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..adc1e8a624cb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -35,6 +35,7 @@
#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
+#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3ac170f9d94d..d54a028cdfeb 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
if (!usb_endpoint_xfer_isoc(desc))
- return 0;
+ goto out;
/* Link TRB for ISOC. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
+out:
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
- strlcat(dep->name, "-control", sizeof(dep->name));
+ /* don't change name */
break;
case USB_ENDPOINT_XFER_ISOC:
strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -2487,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
* implemented.
*/
- dwc->gadget_driver->resume(&dwc->gadget);
+ if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->resume(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
}
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a5c62093c26c..de9ffd60fcfa 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -656,7 +656,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
ssp_cap->bmAttributes = cpu_to_le32(1);
/* Min RX/TX Lane Count = 1 */
- ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12);
+ ssp_cap->wFunctionalitySupport =
+ cpu_to_le16((1 << 8) | (1 << 12));
/*
* bmSublinkSpeedAttr[0]:
@@ -666,7 +667,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
* LSM = 10 (10 Gbps)
*/
ssp_cap->bmSublinkSpeedAttr[0] =
- (3 << 4) | (1 << 14) | (0xa << 16);
+ cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
/*
* bmSublinkSpeedAttr[1] =
* ST = Symmetric, TX
@@ -675,7 +676,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
* LSM = 10 (10 Gbps)
*/
ssp_cap->bmSublinkSpeedAttr[1] =
- (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7);
+ cpu_to_le32((3 << 4) | (1 << 14) |
+ (0xa << 16) | (1 << 7));
}
return le16_to_cpu(bos->wTotalLength);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8cfce105c7ee..e21ca2bd6839 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1147,8 +1147,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = FUNCTIONFS_MAGIC;
sb->s_op = &ffs_sb_operations;
sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 84c0ee5ebd1e..58fc199a18ec 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kfifo.h>
+#include <linux/spinlock.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -89,6 +90,7 @@ struct f_midi {
unsigned int buflen, qlen;
/* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
+ spinlock_t transmit_lock;
unsigned int in_last_port;
struct gmidi_in_port in_ports_array[/* in_ports */];
@@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
- midi_alloc_ep_req(midi->out_ep, midi->buflen);
+ midi_alloc_ep_req(midi->out_ep,
+ max_t(unsigned, midi->buflen,
+ bulk_out_desc.wMaxPacketSize));
if (req == NULL)
return -ENOMEM;
@@ -597,17 +601,24 @@ static void f_midi_transmit(struct f_midi *midi)
{
struct usb_ep *ep = midi->in_ep;
int ret;
+ unsigned long flags;
/* We only care about USB requests if IN endpoint is enabled */
if (!ep || !ep->enabled)
goto drop_out;
+ spin_lock_irqsave(&midi->transmit_lock, flags);
+
do {
ret = f_midi_do_transmit(midi, ep);
- if (ret < 0)
+ if (ret < 0) {
+ spin_unlock_irqrestore(&midi->transmit_lock, flags);
goto drop_out;
+ }
} while (ret);
+ spin_unlock_irqrestore(&midi->transmit_lock, flags);
+
return;
drop_out:
@@ -1201,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
if (status)
goto setup_fail;
+ spin_lock_init(&midi->transmit_lock);
+
++opts->refcnt;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5cdaf0150a4e..e64479f882a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
return -ENODEV;
/* superblock */
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = GADGETFS_MAGIC;
sb->s_op = &gadget_fs_operations;
sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 81d42cce885a..18569de06b04 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc)
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
}
-
- /* NOTE: normally, the next call to the gadget driver is in
- * charge of disabling endpoints... usually disconnect().
- * The exception would be entering a high speed test mode.
- *
- * FIXME remove this code ... and retest thoroughly.
- */
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- if (ep->ep.desc) {
- spin_unlock(&udc->lock);
- usba_ep_disable(&ep->ep);
- spin_lock(&udc->lock);
- }
- }
}
static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 4151597e9d28..e4e70e11d0f6 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
-#ifdef CONFIG_HAS_DMA
- dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
- gadget->dev.dma_parms = parent->dma_parms;
- gadget->dev.dma_mask = parent->dma_mask;
-#endif
-
if (release)
gadget->dev.release = release;
else
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 80c1de239e9a..bad0d1f9a41d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,6 +1861,12 @@ no_bw:
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ xhci->usb2_ports = NULL;
+ xhci->usb3_ports = NULL;
+ xhci->port_array = NULL;
+ xhci->rh_bw = NULL;
+ xhci->ext_caps = NULL;
+
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7a1c42..48672fac7ff3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
static const char hcd_name[] = "xhci_hcd";
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5c15e9bc5f7a..474b5fa14900 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+
+ /*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
+ */
+ if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+ xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3936c4..529c3c40f901 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
#include "xhci.h" /* for hcd_to_xhci() */
enum xhci_plat_type {
- XHCI_PLAT_TYPE_MARVELL_ARMADA,
+ XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7cf66212ceae..99b4ff42f7a0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
- if (xhci->xhc_state) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d51ee0c3cf9f..9e71c96ad74a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
- usb_hcd_resume_root_hub(hcd);
usb_hcd_resume_root_hub(xhci->shared_hcd);
+ usb_hcd_resume_root_hub(hcd);
}
}
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return retval;
}
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying or halted */
+ if (xhci->xhc_state) /* dying, removing or halted */
goto out;
if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Reset complete\n");
+ /*
+ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+ * address memory pointers actually. So, this driver clears the AC64
+ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+ * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+ */
+ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+ xhci->hcc_params &= ~BIT(0);
+
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e293e0974f48..6c629c97f8ad 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+#define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
#define XHCI_PME_STUCK_QUIRK (1 << 20)
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
+#define XHCI_NO_64BIT_SUPPORT (1 << 23)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 579587d97217..3d7af85aecb9 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -65,9 +65,7 @@ struct phy_8x16 {
void __iomem *regs;
struct clk *core_clk;
struct clk *iface_clk;
- struct regulator *v3p3;
- struct regulator *v1p8;
- struct regulator *vdd;
+ struct regulator_bulk_data regulator[3];
struct reset_control *phy_reset;
@@ -78,51 +76,6 @@ struct phy_8x16 {
struct notifier_block reboot_notify;
};
-static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
-{
- int ret;
-
- ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
- if (ret)
- return ret;
-
- ret = regulator_enable(qphy->vdd);
- if (ret)
- return ret;
-
- ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
- if (ret)
- goto off_vdd;
-
- ret = regulator_enable(qphy->v3p3);
- if (ret)
- goto off_vdd;
-
- ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
- if (ret)
- goto off_3p3;
-
- ret = regulator_enable(qphy->v1p8);
- if (ret)
- goto off_3p3;
-
- return 0;
-
-off_3p3:
- regulator_disable(qphy->v3p3);
-off_vdd:
- regulator_disable(qphy->vdd);
-
- return ret;
-}
-
-static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
-{
- regulator_disable(qphy->v1p8);
- regulator_disable(qphy->v3p3);
- regulator_disable(qphy->vdd);
-}
-
static int phy_8x16_notify_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
@@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy)
static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
{
- struct regulator_bulk_data regs[3];
struct device *dev = qphy->phy.dev;
int ret;
@@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
if (IS_ERR(qphy->iface_clk))
return PTR_ERR(qphy->iface_clk);
- regs[0].supply = "v3p3";
- regs[1].supply = "v1p8";
- regs[2].supply = "vddcx";
+ qphy->regulator[0].supply = "v3p3";
+ qphy->regulator[1].supply = "v1p8";
+ qphy->regulator[2].supply = "vddcx";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
+ qphy->regulator);
if (ret)
return ret;
- qphy->v3p3 = regs[0].consumer;
- qphy->v1p8 = regs[1].consumer;
- qphy->vdd = regs[2].consumer;
-
qphy->phy_reset = devm_reset_control_get(dev, "phy");
if (IS_ERR(qphy->phy_reset))
return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev)
if (ret < 0)
goto off_core;
- ret = phy_8x16_regulators_enable(qphy);
- if (0 && ret)
+ ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
+ qphy->regulator);
+ if (WARN_ON(ret))
goto off_clks;
qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@ off_extcon:
extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
&qphy->vbus_notify);
off_power:
- phy_8x16_regulators_disable(qphy);
+ regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
off_clks:
clk_disable_unprepare(qphy->iface_clk);
off_core:
@@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev)
clk_disable_unprepare(qphy->iface_clk);
clk_disable_unprepare(qphy->core_clk);
- phy_8x16_regulators_disable(qphy);
+ regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b4de70ee16d3..000f9750149f 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
goto __usbhs_pkt_handler_end;
}
- ret = func(pkt, &is_done);
+ if (likely(func))
+ ret = func(pkt, &is_done);
if (is_done)
__usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len;
+ usbhsf_tx_irq_ctrl(pipe, 0);
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 664b263e4b20..53d104b56ef1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
+ unsigned long flags;
ureq->req.actual = pkt->actual;
- usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_lock(priv, flags);
+ if (uep)
+ __usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_unlock(priv, flags);
}
static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbfe761c7fba..dd47823bb014 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index b283eb8b86d6..bbeeb2bd55a8 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
+ if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+ dev_err(&port->dev, "required endpoint is missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
cypress_set_termios(tty, port, &priv->tmp_termios);
/* setup the port and start reading from the device */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
- __func__);
- return -1;
- }
-
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 010a42a92688..16e8e37b3b36 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
static int digi_startup(struct usb_serial *serial)
{
+ struct device *dev = &serial->interface->dev;
struct digi_serial *serial_priv;
int ret;
+ int i;
+
+ /* check whether the device has the expected number of endpoints */
+ if (serial->num_port_pointers < serial->type->num_ports + 1) {
+ dev_err(dev, "OOB endpoints missing\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+ if (!serial->port[i]->read_urb) {
+ dev_err(dev, "bulk-in endpoint missing\n");
+ return -ENODEV;
+ }
+ if (!serial->port[i]->write_urb) {
+ dev_err(dev, "bulk-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 427ae43ee898..3a814e802dee 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* ICP DAS I-756xU devices */
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513994..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
#define NOVITUS_BONO_E_PID 0x6010
/*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID 0x1b5c
+#define ICPDAS_I7560U_PID 0x0103
+#define ICPDAS_I7561U_PID 0x0104
+#define ICPDAS_I7563U_PID 0x0105
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 4446b8d70ac2..885655315de1 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
static int mct_u232_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
+ /* check first to simplify error handling */
+ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+ dev_err(&port->dev, "expected endpoint missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
- priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+ priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834b83..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e2..90901861bfc0 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
unsigned int max_sectors = 64;
if (us->fflags & US_FL_MAX_SECTORS_MIN)
- max_sectors = PAGE_CACHE_SIZE >> 9;
+ max_sectors = PAGE_SIZE >> 9;
if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 13e4cc31bc79..16bc679dc2fc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+static int uas_target_alloc(struct scsi_target *starget)
+{
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)
+ dev_to_shost(starget->dev.parent)->hostdata;
+
+ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+ starget->no_report_luns = 1;
+
+ return 0;
+}
+
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
+ .target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
+ /*
+ * 1 tag is reserved for untagged commands +
+ * 1 tag to avoid off by one errors in some bridge firmwares
+ */
+ shost->can_queue = devinfo->qdepth - 2;
+
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ "Seagate",
+ "Expansion Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_LUNS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
+ case 'j':
+ f |= US_FL_NO_REPORT_LUNS;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf003f19..e40da7759a0e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
+ if (size > urb->transfer_buffer_length) {
+ /* should not happen, probably malicious packet */
+ if (ud->side == USBIP_STUB) {
+ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+ return 0;
+ } else {
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return -EPIPE;
+ }
+ }
+
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 71a923e53f93..3b1ca4411073 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -735,7 +735,7 @@ out:
out_unmap:
for (i = 0; i < nr_pages; i++)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
kfree(pages);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f6f28cc7eb45..e76bd91a29da 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/delay.h>
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
vp_iowrite8(0, &vp_dev->common->device_status);
- /* Flush out the status write, and flush in device writes,
- * including MSI-X interrupts, if any. */
- vp_ioread8(&vp_dev->common->device_status);
+ /* After writing 0 to device_status, the driver MUST wait for a read of
+ * device_status to return 0 before reinitializing the device.
+ * This will flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ while (vp_ioread8(&vp_dev->common->device_status))
+ msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 488017a0806a..cb7138c97c69 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
}