aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/ec.c29
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c12
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/edac/sb_edac.c35
-rw-r--r--drivers/firmware/efi/arm-init.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c32
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c4
-rw-r--r--drivers/hwmon/fam15h_power.c8
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c99
-rw-r--r--drivers/i2c/busses/i2c-octeon.c17
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c1
-rw-r--r--drivers/infiniband/core/cache.c10
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/iwpm_msg.c2
-rw-r--r--drivers/infiniband/core/mad.c6
-rw-r--r--drivers/infiniband/core/sysfs.c24
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c31
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c6
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c13
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c41
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c97
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c151
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/ethoc.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c6
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/irq.c19
-rw-r--r--drivers/of/of_reserved_mem.c11
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sd.h5
136 files changed, 1261 insertions, 725 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 31e8da648fff..262ca31b86d9 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
*/
- acpi_boot_ec_enable();
+ acpi_ec_dsdt_probe();
printk(KERN_INFO PREFIX "Interpreter enabled\n");
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0e70181f150c..73c76d646064 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
return AE_OK;
}
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+ {"PNP0C09", 0},
+ {"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
{
- if (!boot_ec)
+ acpi_status status;
+
+ if (boot_ec)
return 0;
+
+ /*
+ * Finding EC from DSDT if there is no ECDT EC available. When this
+ * function is invoked, ACPI tables have been fully loaded, we can
+ * walk namespace now.
+ */
+ boot_ec = make_acpi_ec();
+ if (!boot_ec)
+ return -ENOMEM;
+ status = acpi_get_devices(ec_device_ids[0].id,
+ ec_parse_device, boot_ec, NULL);
+ if (ACPI_FAILURE(status) || !boot_ec->handle)
+ return -ENODEV;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;
return 0;
@@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
return -EFAULT;
}
-static const struct acpi_device_id ec_device_ids[] = {
- {"PNP0C09", 0},
- {"", 0},
-};
-
#if 0
/*
* Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9bb0773d39bf..27cc7feabfe4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 53ddba26578c..98efbfcdb503 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
+ select MFD_SYSCON if ARCH_LPC18XX
---help---
Support for clock providers on NXP platforms.
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 020a29acc5b0..51f54380474b 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
/* register fixed rate clocks */
clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
- CLK_IS_ROOT, 24000000);
+ 0, 24000000);
clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
- CLK_IS_ROOT, 8000000);
+ 0, 8000000);
clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
- CLK_IS_ROOT, 8000000);
+ 0, 8000000);
clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
- CLK_IS_ROOT, 32000);
+ 0, 32000);
clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
- CLK_IS_ROOT, 24000000);
+ 0, 24000000);
/* fixed rate (optional) clock */
if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
pr_info("pic32-clk: dt requests SOSC.\n");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0d159b513469..ee367e9b7d2e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_clear_update_util_hook(policy->cpu);
+ pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
cpu = all_cpu_data[0];
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
@@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_sysfs_pct);
limits->max_perf_pct = max(limits->min_policy_pct,
limits->max_perf_pct);
- limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
limits->min_perf = div_fp(limits->min_perf_pct, 100);
limits->max_perf = div_fp(limits->max_perf_pct, 100);
+ limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
intel_pstate_set_update_util_hook(policy->cpu);
@@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
- policy->cpuinfo.max_freq =
- cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ update_turbo_state();
+ policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
intel_pstate_init_acpi_perf_limits(policy);
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6aa256b0a1ed..c3ee3ad98a63 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mod_work(&mci->work, value);
+ if (mci->op_state == OP_RUNNING_POLL)
+ edac_mod_work(&mci->work, value);
}
mutex_unlock(&mem_ctls_mutex);
}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index b4d0bf6534cf..6744d88bdea8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
};
-#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+ GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+ GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
/* Device 16, functions 2-7 */
@@ -326,6 +329,7 @@ struct pci_id_descr {
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs;
+ enum type type;
};
struct sbridge_dev {
@@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
};
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) { \
+ .descr = A, \
+ .n_devs = ARRAY_SIZE(A), \
+ .type = T \
+}
+
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
@@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
@@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
{0,} /* 0 terminated list. */
};
@@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
{0,}
};
@@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
{0,} /* 0 terminated list. */
};
@@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_tad[i],
rir_offset[j][k],
&reg);
- tmp_mb = RIR_OFFSET(reg) << 6;
+ tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
i, j, k,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
+ (u32)RIR_RNK_TGT(pvt->info.type, reg),
reg);
}
}
@@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
rir_offset[n_rir][idx],
&reg);
- *rank = RIR_RNK_TGT(reg);
+ *rank = RIR_RNK_TGT(pvt->info.type, reg);
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
@@ -3357,12 +3366,12 @@ fail0:
#define ICPU(model, table) \
{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
-/* Order here must match "enum type" */
static const struct x86_cpu_id sbridge_cpuids[] = {
ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
+ ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
{ }
};
@@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+ rc = sbridge_register_mci(sbridge_dev, ptable->type);
if (unlikely(rc < 0))
goto fail1;
}
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index a850cbc48d8d..c49d50e68aee 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -174,6 +174,7 @@ static __init void reserve_regions(void)
{
efi_memory_desc_t *md;
u64 paddr, npages, size;
+ int resv;
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
@@ -190,12 +191,14 @@ static __init void reserve_regions(void)
paddr = md->phys_addr;
npages = md->num_pages;
+ resv = is_reserve_region(md);
if (efi_enabled(EFI_DBG)) {
char buf[64];
- pr_info(" 0x%012llx-0x%012llx %s",
+ pr_info(" 0x%012llx-0x%012llx %s%s\n",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
- efi_md_typeattr_format(buf, sizeof(buf), md));
+ efi_md_typeattr_format(buf, sizeof(buf), md),
+ resv ? "*" : "");
}
memrange_efi_to_native(&paddr, &npages);
@@ -204,14 +207,9 @@ static __init void reserve_regions(void)
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
- if (is_reserve_region(md)) {
+ if (resv)
memblock_mark_nomap(paddr, size);
- if (efi_enabled(EFI_DBG))
- pr_cont("*");
- }
- if (efi_enabled(EFI_DBG))
- pr_cont("\n");
}
set_bit(EFI_MEMMAP, &efi.flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 992f00b65be4..01c36b8d6222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -799,6 +799,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
+ int vmid;
};
/*
@@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+ uint32_t oa_base, uint32_t oa_size,
+ bool vmid_switch);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 199f76baf22c..8943099eb135 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
return result;
}
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+ release_firmware(adev->pm.fw);
+ return 0;
+ }
+ /* cannot release other firmware because they are not created by cgs */
+ return -EINVAL;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
+ amdgpu_cgs_rel_firmware,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bb8b149786d7..964f31404f17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
*/
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
{
- if (adev->mode_info.atom_context)
+ if (adev->mode_info.atom_context) {
kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
kfree(adev->mode_info.atom_context);
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
@@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_block_status[i].valid = false;
}
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (adev->ip_blocks[i].funcs->late_fini)
+ adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ }
+
return 0;
}
@@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_atombios_has_gpu_virtualization_table(adev);
/* Post card if necessary */
- if (!amdgpu_card_posted(adev) ||
- adev->virtualization.supports_sr_iov) {
+ if (!amdgpu_card_posted(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 34e35423b78e..7a0b1e50f293 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
+ int vmid = 0, old_vmid = ring->vmid;
struct fence *hwf;
uint64_t ctx;
@@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (job) {
vm = job->vm;
ctx = job->ctx;
+ vmid = job->vm_id;
} else {
vm = NULL;
ctx = 0;
+ vmid = 0;
}
if (!ring->ready) {
@@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
+ job->oa_base, job->oa_size,
+ (ring->current_ctx == ctx) && (old_vmid != vmid));
if (r) {
amdgpu_ring_undo(ring);
return r;
@@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
-
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
continue;
@@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
need_ctx_switch);
need_ctx_switch = false;
+ ring->vmid = vmid;
}
if (ring->funcs->emit_hdp_invalidate)
@@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
+ ring->vmid = old_vmid;
amdgpu_ring_undo(ring);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 6bd961fb43dc..82256558e0f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
if (ret)
return ret;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_fini(adev->powerplay.pp_handle);
- }
-#endif
-
return ret;
}
@@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
return ret;
}
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pp_enabled) {
+ amdgpu_pm_sysfs_fini(adev);
+ amd_powerplay_fini(adev->powerplay.pp_handle);
+ }
+
+ if (adev->powerplay.ip_funcs->late_fini)
+ adev->powerplay.ip_funcs->late_fini(
+ adev->powerplay.pp_handle);
+#endif
+}
+
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
@@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
+ .late_fini = amdgpu_pp_late_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3b02272db678..870f9494252c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
ring->ring = NULL;
ring->ring_obj = NULL;
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efafb04..48618ee324eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
return r;
}
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ memset(sa_manager->cpu_ptr, 0, sa_manager->size);
amdgpu_bo_unreserve(sa_manager->bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 01abfc21b4a2..e19520c4b4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int r;
- if (adev->uvd.vcpu_bo == NULL)
- return 0;
+ kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
+ if (adev->uvd.vcpu_bo) {
+ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+ if (!r) {
+ amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+ amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+ }
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ }
amdgpu_ring_fini(&adev->uvd.ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed30ba11..62a4c127620f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+ uint32_t oa_base, uint32_t oa_size,
+ bool vmid_switch)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
- ring->type == AMDGPU_RING_TYPE_COMPUTE))
+ pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush &&
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ea407db1fbcf..5ec1f1e9c983 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 518dca43b133..9dc4e24e31e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
@@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ cik_sdma_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
if (r)
return r;
- /* unhalt the MEs */
- cik_sdma_enable(adev, true);
+ /* halt the engine before programing */
+ cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ cik_sdma_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
index 245cabf06575..ed03b75175d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
static int fiji_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 7f18a53ab53a..8c6ad1e72f02 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -991,6 +991,22 @@ out:
return err;
}
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+}
+
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ gfx_v7_0_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index f19bab68fd83..9f6f8669edc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -836,6 +836,26 @@ err1:
return r;
}
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ))
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_rlc_fini(adev);
- kfree(adev->gfx.rlc.register_list_format);
+ gfx_v8_0_free_microcode(adev);
return 0;
}
@@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
- case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
+ case CHIP_TOPAZ:
+ amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+ 0x00000000 : 0x00000002);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 460bc8ad37e6..825ccd63f2dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
static int iceland_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f4c3130d3fdb..b556bd0a8797 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
}
}
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
@@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ sdma_v2_4_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
return -EINVAL;
}
- /* unhalt the MEs */
- sdma_v2_4_enable(adev, true);
+ /* halt the engine before programing */
+ sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
@@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v2_4_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31d99b0010f7..532ea88da66a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
}
}
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
@@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ /* unhalt the MEs */
+ sdma_v3_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v3_0_ctx_switch_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
}
}
- /* unhalt the MEs */
- sdma_v3_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v3_0_ctx_switch_enable(adev, true);
+ /* disble sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
@@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v3_0_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index b7615cefcac4..f06f6f4dc3a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
static int tonga_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 6080951d539d..afce1edbe250 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -157,6 +157,7 @@ struct amd_ip_funcs {
int (*hw_init)(void *handle);
/* tears down the hw state */
int (*hw_fini)(void *handle);
+ void (*late_fini)(void *handle);
/* handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
/* handles IP specific hw/sw changes for resume */
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index a461e155a160..7464daf89ca1 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type);
+
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
@@ -645,6 +648,7 @@ struct cgs_ops {
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
+ cgs_rel_firmware rel_firmware;
/* cg pg interface*/
cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state;
@@ -738,6 +742,8 @@ struct cgs_device
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type) \
+ CGS_CALL(rel_firmware, dev, type)
#define cgs_set_powergating_state(dev, block_type, state) \
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 8e345bfddb69..e629f8a9fe93 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
- goto err;
+ goto err1;
pr_info("amdgpu: powerplay initialized\n");
return 0;
+err1:
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
err:
pr_err("amdgpu: powerplay initialization failed\n");
return ret;
@@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 46410e3c7349..fb88e4e5d625 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
pem_unregister_interrupts(eventmgr);
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
- if (eventmgr != NULL)
- kfree(eventmgr);
}
int eventmgr_init(struct pp_instance *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 24a16e49b571..586f73276226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1c48917da3cf..20f20e075588 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
if (hwmgr == NULL || hwmgr->ps == NULL)
return -EINVAL;
+ /* do hwmgr finish*/
+ kfree(hwmgr->backend);
+
+ kfree(hwmgr->start_thermal_controller.function_list);
+
+ kfree(hwmgr->set_temperature_range.function_list);
+
kfree(hwmgr->ps);
kfree(hwmgr);
return 0;
@@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
int phm_find_boot_level(void *table,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
index 0b99ab3ba0c5..ae96f14b827c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
- sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 16fed487973b..d27e8c40602a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
}
- /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
- /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
- /* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
- if (NULL != allowed_vdd_mclk_table) {
- /* Initialize Vddci DPM table based on allow Mclk values */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
- data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
- data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
- data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
- }
-
/* setup PCIE gen speed levels*/
tonga_setup_default_pcie_tables(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 10e3630ee39d..296ec7ef6d45 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
+ if (NULL != hwmgr->soft_pp_table)
hwmgr->soft_pp_table = NULL;
- }
- if (NULL != pp_table_information->vdd_dep_on_sclk)
- pp_table_information->vdd_dep_on_sclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_sclk);
+ pp_table_information->vdd_dep_on_sclk = NULL;
- if (NULL != pp_table_information->vdd_dep_on_mclk)
- pp_table_information->vdd_dep_on_mclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_mclk);
+ pp_table_information->vdd_dep_on_mclk = NULL;
- if (NULL != pp_table_information->valid_mclk_values)
- pp_table_information->valid_mclk_values = NULL;
+ kfree(pp_table_information->valid_mclk_values);
+ pp_table_information->valid_mclk_values = NULL;
- if (NULL != pp_table_information->valid_sclk_values)
- pp_table_information->valid_sclk_values = NULL;
+ kfree(pp_table_information->valid_sclk_values);
+ pp_table_information->valid_sclk_values = NULL;
- if (NULL != pp_table_information->vddc_lookup_table)
- pp_table_information->vddc_lookup_table = NULL;
+ kfree(pp_table_information->vddc_lookup_table);
+ pp_table_information->vddc_lookup_table = NULL;
- if (NULL != pp_table_information->vddgfx_lookup_table)
- pp_table_information->vddgfx_lookup_table = NULL;
+ kfree(pp_table_information->vddgfx_lookup_table);
+ pp_table_information->vddgfx_lookup_table = NULL;
- if (NULL != pp_table_information->mm_dep_table)
- pp_table_information->mm_dep_table = NULL;
+ kfree(pp_table_information->mm_dep_table);
+ pp_table_information->mm_dep_table = NULL;
- if (NULL != pp_table_information->cac_dtp_table)
- pp_table_information->cac_dtp_table = NULL;
+ kfree(pp_table_information->cac_dtp_table);
+ pp_table_information->cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table)
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != pp_table_information->ppm_parameter_table)
- pp_table_information->ppm_parameter_table = NULL;
+ kfree(pp_table_information->ppm_parameter_table);
+ pp_table_information->ppm_parameter_table = NULL;
- if (NULL != pp_table_information->pcie_table)
- pp_table_information->pcie_table = NULL;
+ kfree(pp_table_information->pcie_table);
+ pp_table_information->pcie_table = NULL;
- if (NULL != hwmgr->pptable) {
- kfree(hwmgr->pptable);
- hwmgr->pptable = NULL;
- }
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 673a75c74e18..8e52a2e82db5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index de618ead9db8..043b6ac09d5f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c483baf6b4fb..0728c1e3d97a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
int smum_fini(struct pp_smumgr *smumgr)
{
+ kfree(smumgr->device);
kfree(smumgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 32820b680d88..b22722eabafc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
static int tonga_smu_fini(struct pp_smumgr *smumgr)
{
+ struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0ec1ad961e0d..dc723f7ead7d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
+ .max_register = 0x11fc,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index fbe304ee6c80..2aec27dbb5bb 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
+ if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf3482e..c6cf837c5193 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ if (IS_ERR(fbi->screen_base)) {
+ ret = PTR_ERR(fbi->screen_base);
+ goto fail_unlock;
+ }
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7daf4054dd2b..69836f5685b1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
return ERR_CAST(pages);
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (msm_obj->vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
}
return msm_obj->vaddr;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b89ca5174863..eb4bb8b2f3a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->dev = dev;
submit->gpu = gpu;
+ submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
+ INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
@@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ submit->bos[i].flags = 0;
+
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index b48f73ac6389..0857710c2ff2 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ continue;
+
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b221..42f5359cf988 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
+ if (IS_ERR(ring->start)) {
+ ret = PTR_ERR(ring->start);
+ goto fail;
+ }
ring->end = ring->start + (size / 4);
ring->cur = ring->start;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index c612dc1f1eb4..126a85cc81bc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -16,9 +16,9 @@ enum nvkm_devidx {
NVKM_SUBDEV_MC,
NVKM_SUBDEV_BUS,
NVKM_SUBDEV_TIMER,
+ NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_FB,
NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index db10c11f0595..c5a6ebd5a478 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
struct nvbios_ocfg {
- u16 match;
+ u8 proto;
+ u8 flags;
u16 clkcmp[2];
};
@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 57aaf98a26f9..300ea03be8f0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
+ fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
fini:
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 0f3e4bb411cc..7d9248b8c664 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t fg;
uint32_t bg;
uint32_t dsize;
- uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 8);
- dsize = ALIGN(width * image->height, 32) >> 5;
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ dsize = ALIGN(image->width * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 33d9ee0fac40..1aeb698e9707 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a0913359ac05..839f4c8c1805 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index a74c5dd27dc0..e2a64ed14b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
nvkm-y += nvkm/engine/disp/sornv50.o
nvkm-y += nvkm/engine/disp/sorg94.o
nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/dport.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index f0314664349c..5dd34382f55a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
mask |= 0x0001 << or;
mask |= 0x0100 << head;
+
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
@@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index b6944142d616..f4b9cf8574be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -36,7 +36,7 @@ gm107_disp = {
.outp.internal.crt = nv50_dac_output_new,
.outp.internal.tmds = nv50_sor_output_new,
.outp.internal.lvds = nv50_sor_output_new,
- .outp.internal.dp = gf119_sor_dp_new,
+ .outp.internal.dp = gm107_sor_dp_new,
.dac.nr = 3,
.dac.power = nv50_dac_power,
.dac.sense = nv50_dac_sense,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 4226d2153b9c..fcb1b0c46d64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
if (outp->info.location == 0) {
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
} else {
@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
pclk = pclk / 2;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index e9067ba4e179..4e983f6d7032 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
-int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
- struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index b4b41b135643..22706c0a54b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,8 +40,7 @@ static int
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 loff = gf119_sor_loff(outp);
- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+ nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
@@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
return 0;
}
-static int
+int
gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644
index 000000000000..37790b2617c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 soff = outp->base.or * 0x800;
+ const u32 data = 0x01010101 * pattern;
+ if (outp->base.info.sorconf.link & 1)
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+ else
+ nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+ return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+ .pattern = gm107_sor_dp_pattern,
+ .lnk_pwr = g94_sor_dp_lnk_pwr,
+ .lnk_ctl = gf119_sor_dp_lnk_ctl,
+ .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 2cfbef9c344f..c44fa7ea672a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
}
static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
- struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 soff = gm200_sor_soff(outp);
- const u32 data = 0x01010101 * pattern;
- if (outp->base.info.sorconf.link & 1)
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
- else
- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
- return 0;
-}
-
-static int
gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
static const struct nvkm_output_dp_func
gm200_sor_dp_func = {
- .pattern = gm200_sor_dp_pattern,
+ .pattern = gm107_sor_dp_pattern,
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 9513badb8220..ae9ab5b1ab97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
}
static const struct nvkm_enum gf100_mp_warp_error[] = {
- { 0x00, "NO_ERROR" },
- { 0x01, "STACK_MISMATCH" },
+ { 0x01, "STACK_ERROR" },
+ { 0x02, "API_STACK_ERROR" },
+ { 0x03, "RET_EMPTY_STACK_ERROR" },
+ { 0x04, "PC_WRAP" },
{ 0x05, "MISALIGNED_PC" },
- { 0x08, "MISALIGNED_GPR" },
- { 0x09, "INVALID_OPCODE" },
- { 0x0d, "GPR_OUT_OF_BOUNDS" },
- { 0x0e, "MEM_OUT_OF_BOUNDS" },
- { 0x0f, "UNALIGNED_MEM_ACCESS" },
+ { 0x06, "PC_OVERFLOW" },
+ { 0x07, "MISALIGNED_IMMC_ADDR" },
+ { 0x08, "MISALIGNED_REG" },
+ { 0x09, "ILLEGAL_INSTR_ENCODING" },
+ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+ { 0x0b, "ILLEGAL_INSTR_PARAM" },
+ { 0x0c, "INVALID_CONST_ADDR" },
+ { 0x0d, "OOR_REG" },
+ { 0x0e, "OOR_ADDR" },
+ { 0x0f, "MISALIGNED_ADDR" },
{ 0x10, "INVALID_ADDR_SPACE" },
- { 0x11, "INVALID_PARAM" },
+ { 0x11, "ILLEGAL_INSTR_PARAM2" },
+ { 0x12, "INVALID_CONST_ADDR_LDC" },
+ { 0x13, "GEOMETRY_SM_ERROR" },
+ { 0x14, "DIVERGENT" },
+ { 0x15, "WARP_EXIT" },
{}
};
static const struct nvkm_bitfield gf100_mp_global_error[] = {
+ { 0x00000001, "SM_TO_SM_FAULT" },
+ { 0x00000002, "L1_ERROR" },
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
- { 0x00000008, "OUT_OF_STACK_SPACE" },
+ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+ { 0x00000010, "BPT_INT" },
+ { 0x00000020, "BPT_PAUSE" },
+ { 0x00000040, "SINGLE_STEP_COMPLETE" },
+ { 0x20000000, "ECC_SEC_ERROR" },
+ { 0x40000000, "ECC_DED_ERROR" },
+ { 0x80000000, "TIMEOUT" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index a5e92135cd77..9efb1b48cd54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
- info->match = nvbios_rd16(bios, data + 0x00);
+ info->proto = nvbios_rd08(bios, data + 0x00);
+ info->flags = nvbios_rd16(bios, data + 0x01);
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
}
@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data, idx = 0;
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
- if (info->match == type)
+ if ((info->proto == proto || info->proto == 0xff) &&
+ (info->flags == flags))
break;
}
return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index e292f5679418..389fb13a1998 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
}
static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = &ltc->subdev;
struct nvkm_device *device = subdev->device;
- u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
+ u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
u32 stat = nvkm_rd32(device, base + 0x00c);
if (stat) {
@@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
- gm107_ltc_lts_isr(ltc, c, s);
+ gm107_ltc_intr_lts(ltc, c, s);
mask &= ~(1 << c);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index 2a29bfd5125a..e18e0dc19ec8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
gm200_ltc = {
.oneinit = gm200_ltc_oneinit,
.init = gm200_ltc_init,
- .intr = gm107_ltc_intr, /*XXX: not validated */
+ .intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc = 16,
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9ed8272e54ae..56c43f355ce3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
- int r;
if (dsi->vdds_dsi_reg != NULL)
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index e129245eb8a9..9255c0e1e4a7 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 904d0754ad78..0f18b76c7906 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
- vc4_hvs_dump_state(dev);
- }
-
if (crtc->state->event) {
unsigned long flags;
@@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
vc4_crtc->event = crtc->state->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
}
}
@@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ u32 chan = vc4_crtc->channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (vc4_crtc->event) {
+ if (vc4_crtc->event &&
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ drm_crtc_vblank_put(crtc);
drm_framebuffer_unreference(flip_state->fb);
kfree(flip_state);
@@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
return ret;
}
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
/* Immediately update the plane's legacy fb pointer, so that later
* modeset prep sees the state that will be present when the semaphore
* is released.
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 3446ece21b4a..250ed7e3754c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = {
};
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
};
@@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index cb37751bc99f..861a623bc185 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev,
return -ENOMEM;
/* Make sure that any outstanding modesets have finished. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
+ if (nonblock) {
+ ret = down_trylock(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return -EBUSY;
+ }
+ } else {
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
+ }
}
ret = drm_atomic_helper_prepare_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 6163b95c5411..f99eece4cc97 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -341,6 +341,10 @@
#define SCALER_DISPLACT0 0x00000030
#define SCALER_DISPLACT1 0x00000034
#define SCALER_DISPLACT2 0x00000038
+#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
+ (x) * (SCALER_DISPLACT1 - \
+ SCALER_DISPLACT0))
+
#define SCALER_DISPCTRL0 0x00000040
# define SCALER_DISPCTRLX_ENABLE BIT(31)
# define SCALER_DISPCTRLX_RESET BIT(30)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6de283c8fa3e..f0374f9b56ca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/frame.h>
#include <asm/hypervisor.h>
#include "drmP.h"
#include "vmwgfx_msg.h"
@@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
return -EINVAL;
}
-
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
/**
@@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
return 0;
}
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
/**
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index eb97a9241d17..15aa49d082c4 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data)
*/
static int read_registers(struct fam15h_power_data *data)
{
- int this_cpu, ret, cpu;
int core, this_core;
cpumask_var_t mask;
+ int ret, cpu;
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
if (!ret)
@@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data)
memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
get_online_cpus();
- this_cpu = smp_processor_id();
/*
* Choose the first online core of each compute unit, and then
@@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data)
cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
}
- if (cpumask_test_cpu(this_cpu, mask))
- do_read_registers_on_cu(data);
+ on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
- smp_call_function_many(mask, do_read_registers_on_cu, data, true);
put_online_cpus();
-
free_cpumask_var(mask);
return 0;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c9ff08dbe10c..e30a5939dc0d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -375,7 +375,7 @@ struct lm90_data {
int kind;
u32 flags;
- int update_interval; /* in milliseconds */
+ unsigned int update_interval; /* in milliseconds */
u8 config_orig; /* Original configuration register value */
u8 convrate_orig; /* Original conversion rate register value */
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 64b1208bca5e..4a60ad214747 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -245,6 +245,13 @@ struct i801_priv {
struct platform_device *mux_pdev;
#endif
struct platform_device *tco_pdev;
+
+ /*
+ * If set to true the host controller registers are reserved for
+ * ACPI AML use. Protected by acpi_lock.
+ */
+ bool acpi_reserved;
+ struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC (1 << 0)
@@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
int ret = 0, xact = 0;
struct i801_priv *priv = i2c_get_adapdata(adap);
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved) {
+ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
+ }
+
pm_runtime_get_sync(&priv->pci_dev->dev);
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
@@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
out:
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
return ret;
}
@@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv)
priv->tco_pdev = pdev;
}
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+{
+ struct i801_priv *priv = handler_context;
+ struct pci_dev *pdev = priv->pci_dev;
+ acpi_status status;
+
+ /*
+ * Once BIOS AML code touches the OpRegion we warn and inhibit any
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+
+ /*
+ * BIOS is accessing the host controller so prevent it from
+ * suspending automatically from now on.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ }
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ status = acpi_os_read_port(address, (u32 *)value, bits);
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (adev) {
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+ NULL, priv);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ }
+
+ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (!adev)
+ return;
+
+ acpi_remove_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved)
+ pm_runtime_put(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->adapter.dev.parent = &dev->dev;
ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
priv->adapter.retries = 3;
+ mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
switch (dev->device) {
@@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
- if (err) {
+ if (i801_acpi_probe(priv))
return -ENODEV;
- }
err = pcim_iomap_regions(dev, 1 << SMBBAR,
dev_driver_string(&dev->dev));
@@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"Failed to request SMBus region 0x%lx-0x%Lx\n",
priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
+ i801_acpi_remove(priv);
return err;
}
@@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+ i801_acpi_remove(priv);
return err;
}
@@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev)
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
+ i801_acpi_remove(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index aa5f01efd826..30ae35146723 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
return result;
for (i = 0; i < length; i++) {
- /* for the last byte TWSI_CTL_AAK must not be set */
- if (i + 1 == length)
+ /*
+ * For the last byte to receive TWSI_CTL_AAK must not be set.
+ *
+ * A special case is I2C_M_RECV_LEN where we don't know the
+ * additional length yet. If recv_len is set we assume we're
+ * not reading the final byte and therefore need to set
+ * TWSI_CTL_AAK.
+ */
+ if ((i + 1 == length) && !(recv_len && i == 0))
final_read = true;
/* clear iflg to allow next event */
@@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
data[i] = octeon_i2c_data_read(i2c);
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(i2c->dev,
- "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
- __func__, data[i]);
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
- }
length += data[i];
}
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 6773cadf7c9f..26e7c5187a58 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = {
.remove = i2c_mux_reg_remove,
.driver = {
.name = "i2c-mux-reg",
+ .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
},
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index c2e257d97eff..040966775f40 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
{
int ret = 0;
struct net_device *old_net_dev;
+ enum ib_gid_type old_gid_type;
/* in rdma_cap_roce_gid_table, this funciton should be protected by a
* sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
}
old_net_dev = table->data_vec[ix].attr.ndev;
+ old_gid_type = table->data_vec[ix].attr.gid_type;
if (old_net_dev && old_net_dev != attr->ndev)
dev_put(old_net_dev);
/* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
attr = &zattr;
table->data_vec[ix].context = NULL;
}
- if (default_gid)
- table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+ if (default_gid) {
+ table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+ if (action == GID_TABLE_WRITE_ACTION_DEL)
+ table->data_vec[ix].attr.gid_type = old_gid_type;
+ }
if (table->data_vec[ix].attr.ndev &&
table->data_vec[ix].attr.ndev != old_net_dev)
dev_hold(table->data_vec[ix].attr.ndev);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1d92e091e22e..c99525512b34 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->cm_event.event = IB_CM_USER_ESTABLISHED;
/* Check if the device started its remove_one */
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down) {
queue_delayed_work(cm.wq, &work->work, 0);
} else {
kfree(work);
ret = -ENODEV;
}
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
out:
return ret;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5516fb070344..5c155fa91eec 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
if (err || port_attr->subnet_prefix)
return err;
+ if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+ return 0;
+
err = ib_query_gid(device, port_num, 0, &gid, NULL);
if (err)
return err;
@@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
goto err_mad;
}
- if (ib_add_ibnl_clients()) {
+ ret = ib_add_ibnl_clients();
+ if (ret) {
pr_warn("Couldn't register ibnl clients\n");
goto err_sa;
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 43e3fa27102b..1c41b95cefec 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
__func__, msg_seq);
- return -EINVAL;
+ return -EINVAL;
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 82fb511112da..2d49228f28b2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
/* Now, check to see if there are any methods still in use */
if (!check_method_table(method)) {
/* If not, release management method table */
- kfree(method);
- class->method_table[mgmt_class] = NULL;
- /* Any management classes left ? */
+ kfree(method);
+ class->method_table[mgmt_class] = NULL;
+ /* Any management classes left ? */
if (!check_class_table(class)) {
/* If not, release management class table */
kfree(class);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 5e573bb18660..a5793c8f1590 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
u8 port_num)
{
- struct attribute_group *hsag = NULL;
+ struct attribute_group *hsag;
struct rdma_hw_stats *stats;
- int i = 0, ret;
+ int i, ret;
stats = device->alloc_hw_stats(device, port_num);
@@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
return;
if (!stats->names || stats->num_counters <= 0)
- goto err;
+ goto err_free_stats;
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
hsag = kzalloc(sizeof(*hsag) +
- // 1 extra for the lifespan config entry
- sizeof(void *) * (stats->num_counters + 1),
+ sizeof(void *) * (stats->num_counters + 2),
GFP_KERNEL);
if (!hsag)
- return;
+ goto err_free_stats;
ret = device->get_hw_stats(device, stats, port_num,
stats->num_counters);
if (ret != stats->num_counters)
- goto err;
+ goto err_free_hsag;
stats->timestamp = jiffies;
@@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
if (!hsag->attrs[i])
goto err;
+ sysfs_attr_init(hsag->attrs[i]);
}
/* treat an error here as non-fatal */
hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+ if (hsag->attrs[i])
+ sysfs_attr_init(hsag->attrs[i]);
if (port) {
struct kobject *kobj = &port->kobj;
@@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
return;
err:
- kfree(stats);
for (; i >= 0; i--)
kfree(hsag->attrs[i]);
+err_free_hsag:
kfree(hsag);
+err_free_stats:
+ kfree(stats);
return;
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 6e7050ab9e16..14d7eeb09be6 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct cpu_mask_set *set = &dd->affinity->proc;
- char buf[1024];
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
goto done;
}
@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
&dd->affinity->rcv_intr.mask :
&dd->affinity->rcv_intr.used));
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
- hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+ hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+ cpumask_pr_args(intrs));
/*
* If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
if (node == -1)
node = dd->node;
node_mask = cpumask_of_node(node);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+ hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+ cpumask_pr_args(node_mask));
/* diff will hold all unused cpus */
cpumask_andnot(diff, &set->mask, &set->used);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
- hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+ hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
/* get cpumask of available CPUs on preferred NUMA */
cpumask_and(mask, diff, node_mask);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+ hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
/*
* At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_andnot(diff, &set->mask, &set->used);
cpumask_andnot(mask, diff, node_mask);
}
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+ hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+ cpumask_pr_args(mask));
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) /* empty */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 3b876da745a1..81619fbb5842 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -7832,8 +7832,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* save first 2 flits in the packet that caused
* the error
*/
- dd->err_info_rcvport.packet_flit1 = hdr0;
- dd->err_info_rcvport.packet_flit2 = hdr1;
+ dd->err_info_rcvport.packet_flit1 = hdr0;
+ dd->err_info_rcvport.packet_flit2 = hdr1;
}
switch (info) {
case 1:
@@ -11906,7 +11906,7 @@ static void update_synth_timer(unsigned long opaque)
hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
}
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
#define C_MAX_NAME 13 /* 12 chars + one for /0 */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 5cc492e5776d..0d28a5a40fae 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
dd->rcvhdrtail_dummy_physaddr);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
}
for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 79b2952c0dfb..4cfb13771897 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -214,19 +214,6 @@ const char *print_u32_array(
return ret;
}
-const char *print_u64_array(
- struct trace_seq *p,
- u64 *arr, int len)
-{
- int i;
- const char *ret = trace_seq_buffer_ptr(p);
-
- for (i = 0; i < len; i++)
- trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
- trace_seq_putc(p, 0);
- return ret;
-}
-
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 29f4795f866c..47ffd273ecbd 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -183,7 +183,7 @@ struct user_sdma_iovec {
struct sdma_mmu_node *node;
};
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
struct sdma_mmu_node {
struct mmu_rb_node rb;
@@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
*/
SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
req->tidoffset, req->tidoffset / req->omfactor,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
req->tidoffset / req->omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
}
done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b01ef6eee6e8..0eb09e104542 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dabcc65bd65e..9c0e67bd2ba7 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int eqn;
int err;
- if (entries < 0)
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
if (check_cq_create_flags(attr->flags))
@@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c72797cd9e4f..b48ad85315dc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_CAP_ETH(dev->mdev, scatter_fcs))
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+ if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = L1_CACHE_BYTES;
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
- if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ /*
+ * We don't want to expose information from the PCI bar that is located
+ * after 4096 bytes, so if the arch only supports larger pages, let's
+ * pretend we don't support reading the HCA's core clock. This is also
+ * forced by mmap function.
+ */
+ if (PAGE_SIZE <= 4096 &&
+ field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset =
@@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+ return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
@@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break;
case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
-
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 504117657d41..ce434228a5ea 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr)
+ u32 path_flags, const struct ib_qp_attr *attr,
+ bool alt)
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = attr->pkey_index;
+ path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+ attr->pkey_index);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >=
@@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
} else {
- path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
- 0;
+ path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+ path->fl_free_ar |=
+ (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
path->rlid = cpu_to_be16(ah->dlid);
path->grh_mlid = ah->src_path_bits & 0x7f;
if (ah->ah_flags & IB_AH_GRH)
@@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
path->port = port;
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = attr->timeout << 3;
+ path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
@@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = attr->pkey_index;
+ context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
/* todo implement counter_index functionality */
@@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_AV) {
err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
- attr_mask, 0, attr);
+ attr_mask, 0, attr, false);
if (err)
goto out;
}
@@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_ALT_PATH) {
err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
&context->alt_path,
- attr->alt_port_num, attr_mask, 0, attr);
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ 0, attr, true);
if (err)
goto out;
}
@@ -4013,11 +4019,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+ qp_attr->alt_pkey_index =
+ be16_to_cpu(context->alt_path.pkey_index);
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
- qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+ qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
qp_attr->port_num = context->pri_path.port;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4079,17 +4086,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 7209fbc03ccb..a0b6ebee4d8a 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -36,7 +36,6 @@
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/workqueue.h>
#include <linux/list.h>
@@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
- DEFINE_DMA_ATTRS(attrs);
-
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (!can_do_mlock())
return -EPERM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 5fa4d4d81ee0..7de5134bec85 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
+ __releases(&qp->s_lock)
+ __releases(&qp->s_hlock)
+ __releases(&qp->r_lock)
+ __acquires(&qp->r_lock)
+ __acquires(&qp->s_hlock)
+ __acquires(&qp->s_lock)
{
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bab7db6fa9ab..4f7d9b48df64 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -94,6 +94,7 @@ enum {
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_FLAG_DEV_ADDR_SET = 13,
IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+ IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index b2f42835d76d..951d9abcca8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
{
struct net_device *dev = to_net_dev(d);
int ret;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+ return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 45c40a17d6a6..dc6d241b9406 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
return false;
- netif_addr_lock(priv->dev);
+ netif_addr_lock_bh(priv->dev);
/* The subnet prefix may have changed, update it now so we won't have
* to do it later
@@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
search_gid.global.interface_id = priv->local_gid.global.interface_id;
- netif_addr_unlock(priv->dev);
+ netif_addr_unlock_bh(priv->dev);
err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
priv->dev, &port, &index);
- netif_addr_lock(priv->dev);
+ netif_addr_lock_bh(priv->dev);
if (search_gid.global.interface_id !=
priv->local_gid.global.interface_id)
@@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
}
out:
- netif_addr_unlock(priv->dev);
+ netif_addr_unlock_bh(priv->dev);
return ret;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2d7c16346648..5f58c41ef787 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
@@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
struct ipoib_dev_priv *child_priv;
struct net_device *netdev = priv->dev;
- netif_addr_lock(netdev);
+ netif_addr_lock_bh(netdev);
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
@@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
- netif_addr_unlock(netdev);
+ netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
down_read(&priv->vlan_rwsem);
@@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
int ret = 0;
- netif_addr_lock(dev);
+ netif_addr_lock_bh(dev);
/* Make sure the QPN, reserved and subnet prefix match the current
* lladdr, it also makes sure the lladdr is unicast.
@@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
gid->global.interface_id == 0)
ret = -EINVAL;
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
return ret;
}
@@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
+ /* mark interface in the middle of destruction */
+ set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 82fbc9442608..d3394b6add24 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
priv->local_lid = port_attr.lid;
- netif_addr_lock(dev);
+ netif_addr_lock_bh(dev);
if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
return;
}
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 64a35595eab8..a2f9f29c6ab5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
@@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
if (!rtnl_trylock())
return restart_syscall();
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 646de170ec12..3322ed750172 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
{
unsigned int sg_offset = 0;
- state->desc = req->indirect_desc;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
@@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct scatterlist *sg;
int i;
- state->desc = req->indirect_desc;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
@@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
+ state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
else if (dev->use_fmr)
@@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device)
int mr_page_shift, p;
u64 max_pages_per_mr;
- srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+ srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return;
@@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device)
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->global_mr))
goto err_pd;
- } else {
- srp_dev->global_mr = NULL;
}
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index d7723ce772b3..c04bc6afb965 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 {
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
if (__get_user(p, &up->menu_info))
return -EFAULT;
- umenus = compat_ptr(p);
- if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
- kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
- if (kmenus == NULL)
- return -EFAULT;
- kp->menu_info = kmenus;
-
- if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
+ kp->menu_info = compat_ptr(p);
return 0;
}
@@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus = kp->menu_info;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
__put_user(kp->menu_count, &up->menu_count))
@@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
if (__clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
- if (kp->menu_count == 0)
- return 0;
-
- if (get_user(p, &up->menu_info))
- return -EFAULT;
- umenus = compat_ptr(p);
-
- if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
return 0;
}
@@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 {
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
if (__get_user(p, &up->data))
return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- kdata = compat_alloc_user_space(kp->size);
- if (kdata == NULL)
- return -EFAULT;
- kp->data = kdata;
-
- if (copy_in_user(kdata, udata, kp->size))
- return -EFAULT;
+ kp->data = compat_ptr(p);
return 0;
}
@@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata = kp->data;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
- if (kp->size == 0)
- return 0;
-
- if (get_user(p, &up->data))
- return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- if (copy_in_user(udata, kdata, kp->size))
- return -EFAULT;
-
return 0;
}
@@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct uvc_fh *handle = file->private_data;
union {
struct uvc_xu_control_mapping xmap;
struct uvc_xu_control_query xqry;
} karg;
void __user *up = compat_ptr(arg);
- mm_segment_t old_fs;
long ret;
switch (cmd) {
case UVCIOC_CTRL_MAP32:
- cmd = UVCIOC_CTRL_MAP;
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
+ if (ret)
+ return ret;
+ ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+ if (ret)
+ return ret;
+ ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+ if (ret)
+ return ret;
+
break;
case UVCIOC_CTRL_QUERY32:
- cmd = UVCIOC_CTRL_QUERY;
ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
+ if (ret)
+ return ret;
+ ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
+ if (ret)
+ return ret;
+ ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+ if (ret)
+ return ret;
break;
default:
return -ENOIOCTLCMD;
}
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = video_ioctl2(file, cmd, (unsigned long)&karg);
- set_fs(old_fs);
-
- if (ret < 0)
- return ret;
-
- switch (cmd) {
- case UVCIOC_CTRL_MAP:
- ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
- break;
-
- case UVCIOC_CTRL_QUERY:
- ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
- break;
- }
-
return ret;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c5fe915870ad..a59d55e25d5f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
@@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
-
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 72a2efff8e49..c777cde85ce4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
@@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
@@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+
return features;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc1261dc..50812a1d67bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b010645100..4edb98c3c6c7 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1241,10 @@ error2:
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3c0255e98535..fea0f330ddbd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2416,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7615e0668acb..2e6785b6e8be 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c984462fad2a..4763252bbf85 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
return 0;
err_free_bus:
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index fd4392999eee..f5c8d5db25a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 229ab16fb8d3..b000ddc29553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b84a6918a700..aebbd6ccb9fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
- if (IS_ERR_OR_NULL(fdb)) {
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
@@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
@@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
eth_zero_addr(dmac);
dmac[0] = 0x01;
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
goto out;
@@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
goto out;
@@ -529,7 +529,7 @@ out:
}
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
return err;
}
@@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
esw_fdb_set_vport_rule(esw,
mac,
vport_idx);
+ iter_vaddr->mc_promisc = true;
break;
case MLX5_ACTION_DEL:
if (!iter_vaddr)
@@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
vport->vport, err);
@@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(vlan_grp)) {
+ if (IS_ERR(vlan_grp)) {
err = PTR_ERR(vlan_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
vport->vport, err);
@@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(drop_grp)) {
+ if (IS_ERR(drop_grp)) {
err = PTR_ERR(drop_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
vport->vport, err);
@@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.drop_grp = drop_grp;
vport->egress.allowed_vlans_grp = vlan_grp;
out:
- kfree(flow_group_in);
+ kvfree(flow_group_in);
if (err && !IS_ERR_OR_NULL(vlan_grp))
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
vport->vport, err);
@@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
@@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
vport->vport, err);
@@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
vport->vport, err);
@@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
vport->vport, err);
@@ -1259,7 +1260,7 @@ out:
mlx5_destroy_flow_table(vport->ingress.acl);
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+ if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
@@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+ if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
@@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
@@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+ if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
@@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handle_locked(vport);
-
vport->enabled = true;
/* only PF is trusted by default */
vport->trusted = (vport_num) ? false : true;
-
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
- int err = 0;
struct mlx5_vport *evport;
+ u64 node_guid;
+ int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
@@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
mutex_lock(&esw->state_lock);
if (evport->enabled)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
-
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8b5f0b2c0d5c..e912a3d2505e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
@@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
{
int err = 0;
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_init_fc_stats(dev);
if (err)
return err;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev);
if (err)
goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(dev);
- if (err)
- goto err;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b720a274220d..b82d65802d96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadcfb897..daf44cd4c566 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ u8 *guid;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
+ node_guid);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4a7273771028..6f9e3ddff4a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *p_module,
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width)
-{
- u8 lane;
-
- return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
- p_width, &lane);
-}
-
static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 module, u8 width, u8 lane)
{
@@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- u8 module, width, lane;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
int err;
- err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- &module, &width, &lane);
- if (err) {
- netdev_err(dev, "Failed to retrieve module information\n");
- return err;
- }
-
if (!mlxsw_sp_port->split)
err = snprintf(name, len, "p%d", module + 1);
else
@@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
@@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc:
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
@@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
@@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
@@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
return local_port - offset;
}
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
+{
+ u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
u8 module, cur_width, base_port;
int i;
int err;
@@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
@@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
@@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index e2c022d3e2f3..13b30eaa13d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -229,6 +229,11 @@ struct mlxsw_sp_port {
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
} dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 753064679bde..61cc6869fa65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1105,6 +1105,39 @@ static int qed_get_port_type(u32 media_type)
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
@@ -1116,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- if (IS_PF(hwfn->cdev)) {
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
- } else {
- qed_vf_get_link_params(hwfn, &params);
- qed_vf_get_link_state(hwfn, &link);
- qed_vf_get_link_caps(hwfn, &link_caps);
+ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
}
/* Set the link parameters to pass to protocol driver */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c8667c65e685..c90b2b6ad969 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -12,11 +12,13 @@
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
+#ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5d00d1404bfc..5733d1888223 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4d7b80..2a9228a6e4a0 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 4f7283d05588..44da877d2483 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, dev) {
- dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index eac45d0c75e2..a473c182c91d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4b08a2f52b3e..e6bb0ecb12c7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index db8022ae415b..08885bc8d6db 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
- BUG_ON(segCnt <= 1);
+ WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c4825392d64b..3d2b64e63408 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d0631b6cfd53..62f475e31077 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
+ int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
+ } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
+ if (err) {
+ brcmf_err("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
}
}
done:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 68f1ce02f4bf..2b9a2bc429d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
brcmu_pkt_buf_free_skb(skb);
return;
}
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
brcmf_netif_rx(ifp, skb);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9ed0ed1bf514..4dd5adcdd29b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
!info->attrs[HWSIM_ATTR_FLAGS] ||
!info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_SIGNAL] ||
!info->attrs[HWSIM_ATTR_TX_INFO])
goto out;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 0f48048b8654..3a0faa8fe9d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
void rtl_addr_delay(u32 addr)
{
if (addr == 0xfe)
- msleep(50);
+ mdelay(50);
else if (addr == 0xfd)
msleep(5);
else if (addr == 0xfc)
@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
rtl_addr_delay(addr);
} else {
rtl_set_rfreg(hw, rfpath, addr, mask, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_rfreg_delay);
@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
rtl_addr_delay(addr);
} else {
rtl_set_bbreg(hw, addr, MASKDWORD, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_bb_delay);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 14f2f8c7c260..33daffc4392c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob,
struct device_node **nodepp)
{
struct device_node *root;
- int offset = 0, depth = 0;
+ int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
unsigned int fpsizes[FDT_MAX_DEPTH];
struct device_node *nps[FDT_MAX_DEPTH];
@@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob,
if (nodepp)
*nodepp = NULL;
+ /*
+ * We're unflattening device sub-tree if @dad is valid. There are
+ * possibly multiple nodes in the first level of depth. We need
+ * set @depth to 1 to make fdt_next_node() happy as it bails
+ * immediately when negative @depth is found. Otherwise, the device
+ * nodes except the first one won't be unflattened successfully.
+ */
+ if (dad)
+ depth = initial_depth = 1;
+
root = dad;
fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
nps[depth] = dad;
+
for (offset = 0;
- offset >= 0 && depth >= 0;
+ offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
continue;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index e7bfc175b8e1..6ec743faabe8 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
- * @index: zero-based index of the irq
- *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created.
+ * @index: zero-based index of the IRQ
*
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
*/
int of_irq_get(struct device_node *dev, int index)
{
@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
EXPORT_SYMBOL_GPL(of_irq_get);
/**
- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
- * @name: irq name
+ * @name: IRQ name
*
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created, or error code in case of any other failure.
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
*/
int of_irq_get_byname(struct device_node *dev, const char *name)
{
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index ed01c0172e4a..216648233874 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
/* Need adjust the alignment to satisfy the CMA requirement */
- if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
- align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+ if (IS_ENABLED(CONFIG_CMA)
+ && of_flat_dt_is_compatible(node, "shared-dma-pool")
+ && of_get_flat_dt_prop(node, "reusable", NULL)
+ && !of_get_flat_dt_prop(node, "no-map", NULL)) {
+ unsigned long order =
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+ align = max(align, (phys_addr_t)PAGE_SIZE << order);
+ }
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 3408578b08d6..ff41c310c900 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -230,6 +230,7 @@ static struct {
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f459dff30512..60bff78e9ead 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2867,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
- rw_max = q->limits.io_opt =
- sdkp->opt_xfer_blocks * sdp->sector_size;
- else
+ logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ } else
rw_max = BLK_DEF_MAX_SECTORS;
/* Combine with controller limits */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 654630bb7d0e..765a6f1ac1b7 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
return blocks << (ilog2(sdev->sector_size) - 9);
}
+static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks * sdev->sector_size;
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined: