aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c167
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c192
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c571
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1263
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1949
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h122
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c903
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h1
39 files changed, 5757 insertions, 543 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f79d8124321e..ddb5541882f5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -95,9 +95,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
(u16) (mdev->dev->caps.fw_ver & 0xffff));
strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4726122ea76b..886e1bc86374 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -573,10 +573,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
- struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
- u64 reg_id = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
@@ -600,44 +598,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
- &priv->tunnel_reg_id);
- if (err)
- goto tunnel_err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
}
- memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
- memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
- entry->reg_id = reg_id;
-
- hlist_add_head_rcu(&entry->hlist,
- &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
return 0;
-
-alloc_err:
- if (priv->tunnel_reg_id)
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-tunnel_err:
- mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, priv->port, mac);
- return err;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
@@ -645,39 +610,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
- u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
} else {
- struct mlx4_mac_entry *entry;
- struct hlist_node *tmp;
- struct hlist_head *bucket;
- unsigned int i;
-
- for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
- bucket = &priv->mac_hash[i];
- hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
- en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
- entry->mac);
- mlx4_en_uc_steer_release(priv, entry->mac,
- qpn, entry->reg_id);
-
- mlx4_unregister_mac(dev, priv->port, mac);
- hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
- }
- }
-
- if (priv->tunnel_reg_id) {
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
- priv->tunnel_reg_id = 0;
- }
-
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -1283,6 +1222,75 @@ static void mlx4_en_netpoll(struct net_device *dev)
}
#endif
+static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 reg_id;
+ int err = 0;
+ int *qpn = &priv->base_qpn;
+ struct mlx4_mac_entry *entry;
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ return err;
+
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
+ entry->reg_id = reg_id;
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+
+tunnel_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+ return err;
+}
+
+static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 mac;
+ unsigned int i;
+ int qpn = priv->base_qpn;
+ struct hlist_head *bucket;
+ struct hlist_node *tmp;
+ struct mlx4_mac_entry *entry;
+
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+ mac = mlx4_mac_to_u64(entry->mac);
+ en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
+ entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+
+ mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ }
+ }
+
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+}
+
static void mlx4_en_tx_timeout(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1684,6 +1692,11 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ /* Set Unicast and VXLAN steering rules */
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
+ mlx4_en_set_rss_steer_rules(priv))
+ mlx4_warn(mdev, "Failed setting steering rules\n");
+
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1831,6 +1844,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
for (i = 0; i < priv->tx_ring_num; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ mlx4_en_delete_rss_steer_rules(priv);
+
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
@@ -2800,7 +2816,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
- u64 mac_u64;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2892,17 +2907,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- if (mlx4_is_slave(priv->mdev->dev)) {
- eth_hw_addr_random(dev);
- en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
- mdev->dev->caps.def_mac[priv->port] = mac_u64;
- } else {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
- priv->port, dev->dev_addr);
- err = -EINVAL;
- goto out;
- }
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
+ err = -EINVAL;
+ goto out;
+ } else if (mlx4_is_slave(priv->mdev->dev) &&
+ (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
+ /* Random MAC was assigned in mlx4_slave_cap
+ * in mlx4_core module
+ */
+ dev->addr_assign_type |= NET_ADDR_RANDOM;
+ en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
}
memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e8ec1dec5789..f13a4d7bbf95 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2840,3 +2840,19 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(set_phv_bit);
+
+void mlx4_replace_zero_macs(struct mlx4_dev *dev)
+{
+ int i;
+ u8 mac_addr[ETH_ALEN];
+
+ dev->port_random_macs = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (!dev->caps.def_mac[i] &&
+ dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+ eth_random_addr(mac_addr);
+ dev->port_random_macs |= 1 << i;
+ dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cc3a9897574c..85f1b1e7e505 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -863,6 +863,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
+ mlx4_replace_zero_macs(dev);
+
dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 232b2b55f23b..e1cf9036af22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1378,6 +1378,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
+/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
+void mlx4_replace_zero_macs(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 78f51e103880..93195191f45b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -318,7 +318,7 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
key, NULL);
} else {
mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR_OR_NULL(mailbox))
+ if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 20268634a9ab..3311f35d08e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -422,15 +422,15 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
u64 qp_mask = 0;
int err = 0;
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
- if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
- return -EINVAL;
-
if (attr & MLX4_UPDATE_QP_SMAC) {
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 731423ca575d..ac4b99ab1f85 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1238,8 +1238,10 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
return 0;
undo:
- for (--i; i >= base; --i)
+ for (--i; i >= 0; --i) {
rb_erase(&res_arr[i]->node, root);
+ list_del_init(&res_arr[i]->list);
+ }
spin_unlock_irq(mlx4_tlock(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 75ff58dc1ff5..fabfc9e0a948 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -254,6 +254,156 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
pr_debug("\n");
}
+enum {
+ MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+ MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+ u32 *synd, u8 *status)
+{
+ *synd = 0;
+ *status = 0;
+
+ switch (op) {
+ case MLX5_CMD_OP_TEARDOWN_HCA:
+ case MLX5_CMD_OP_DISABLE_HCA:
+ case MLX5_CMD_OP_MANAGE_PAGES:
+ case MLX5_CMD_OP_DESTROY_MKEY:
+ case MLX5_CMD_OP_DESTROY_EQ:
+ case MLX5_CMD_OP_DESTROY_CQ:
+ case MLX5_CMD_OP_DESTROY_QP:
+ case MLX5_CMD_OP_DESTROY_PSV:
+ case MLX5_CMD_OP_DESTROY_SRQ:
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ case MLX5_CMD_OP_DESTROY_DCT:
+ case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_DEALLOC_PD:
+ case MLX5_CMD_OP_DEALLOC_UAR:
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ case MLX5_CMD_OP_DEALLOC_XRCD:
+ case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_DESTROY_TIR:
+ case MLX5_CMD_OP_DESTROY_SQ:
+ case MLX5_CMD_OP_DESTROY_RQ:
+ case MLX5_CMD_OP_DESTROY_RMP:
+ case MLX5_CMD_OP_DESTROY_TIS:
+ case MLX5_CMD_OP_DESTROY_RQT:
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+ case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ return MLX5_CMD_STAT_OK;
+
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_INIT_HCA:
+ case MLX5_CMD_OP_ENABLE_HCA:
+ case MLX5_CMD_OP_QUERY_PAGES:
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_SET_ISSI:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+ case MLX5_CMD_OP_CREATE_EQ:
+ case MLX5_CMD_OP_QUERY_EQ:
+ case MLX5_CMD_OP_GEN_EQE:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_CREATE_PSV:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_UAR:
+ case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+ case MLX5_CMD_OP_ACCESS_REG:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_MAD_IFC:
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ case MLX5_CMD_OP_SET_MAD_DEMUX:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ *status = MLX5_DRIVER_STATUS_ABORTED;
+ *synd = MLX5_DRIVER_SYND;
+ return -EIO;
+ default:
+ mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+ return -EINVAL;
+ }
+}
+
const char *mlx5_command_str(int command)
{
switch (command) {
@@ -473,6 +623,7 @@ static void cmd_work_handler(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
+ unsigned long flags;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -485,6 +636,9 @@ static void cmd_work_handler(struct work_struct *work)
}
} else {
ent->idx = cmd->max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ clear_bit(ent->idx, &cmd->bitmask);
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
ent->token = alloc_token(cmd);
@@ -584,6 +738,16 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
+static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
+{
+ return &out->syndrome;
+}
+
+static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+{
+ return &out->status;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1081,7 +1245,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
}
}
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -1092,7 +1256,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
s64 ds;
struct mlx5_cmd_stats *stats;
unsigned long flags;
+ unsigned long vector;
+ /* there can be at most 32 command queues */
+ vector = vec & 0xffffffff;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
struct semaphore *sem;
@@ -1110,11 +1277,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
ent->ret = verify_signature(ent);
else
ent->ret = 0;
- ent->status = ent->lay->status_own >> 1;
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->status = MLX5_DRIVER_STATUS_ABORTED;
+ else
+ ent->status = ent->lay->status_own >> 1;
+
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
}
free_ent(cmd, ent->idx);
+
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
if (ent->op < ARRAY_SIZE(cmd->stats)) {
@@ -1136,6 +1308,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
+ err = err ? err : ent->status;
free_cmd(ent);
callback(err, context);
} else {
@@ -1183,6 +1356,11 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
+static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
+{
+ return be16_to_cpu(in->opcode);
+}
+
static int is_manage_pages(struct mlx5_inbox_hdr *in)
{
return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
@@ -1197,6 +1375,15 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
gfp_t gfp;
int err;
u8 status = 0;
+ u32 drv_synd;
+
+ if (pci_channel_offline(dev->pdev) ||
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
+ *get_synd_ptr(out) = cpu_to_be32(drv_synd);
+ *get_status_ptr(out) = status;
+ return err;
+ }
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
@@ -1363,6 +1550,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
int err;
int i;
+ memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev(dev);
if (cmd_if_rev != CMD_IF_REV) {
dev_err(&dev->pdev->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 04ab7e445eae..b51e42d6fbec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -242,6 +242,7 @@ int mlx5_init_cq_table(struct mlx5_core_dev *dev)
struct mlx5_cq_table *table = &dev->priv.cq_table;
int err;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
err = mlx5_cq_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0983a208b299..f2ae62dd8c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -617,5 +617,11 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
extern const struct ethtool_ops mlx5e_ethtool_ops;
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index bce912688ca8..2e022e900939 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -345,9 +345,8 @@ static void mlx5e_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
- ch->max_combined = ncv;
+ ch->max_combined = mlx5e_get_max_num_channels(priv->mdev);
ch->combined_count = priv->params.num_channels;
}
@@ -355,7 +354,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
bool was_opened;
int err = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 59874d666cff..5fc4d2d78cdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -442,12 +442,12 @@ static void mlx5e_disable_rq(struct mlx5e_rq *rq)
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_wq_ll *wq = &rq->wq;
- int i;
- for (i = 0; i < 1000; i++) {
+ while (time_before(jiffies, exp_time)) {
if (wq->cur_sz >= priv->params.min_rx_wqes)
return 0;
@@ -1367,13 +1367,13 @@ int mlx5e_open_locked(struct net_device *netdev)
err = mlx5e_set_dev_port_mtu(netdev);
if (err)
- return err;
+ goto err_clear_state_opened_flag;
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
- return err;
+ goto err_clear_state_opened_flag;
}
mlx5e_update_carrier(priv);
@@ -1382,6 +1382,10 @@ int mlx5e_open_locked(struct net_device *netdev)
schedule_delayed_work(&priv->update_stats_work, 0);
return 0;
+
+err_clear_state_opened_flag:
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ return err;
}
static int mlx5e_open(struct net_device *netdev)
@@ -1400,6 +1404,12 @@ int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ /* May already be CLOSED in case a previous configuration operation
+ * (e.g RX/TX queue size change) that involves close&open failed.
+ */
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_redirect_rqts(priv);
@@ -1833,7 +1843,7 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
- return 0;
+ return err;
}
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
@@ -1994,6 +2004,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2037,8 +2048,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
- int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
+ int nch = mlx5e_get_max_num_channels(mdev);
int err;
if (mlx5e_check_required_hca_cap(mdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b73672f32e2c..cd8f85a251d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -116,7 +116,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+#define MLX5E_MIN_INLINE ETH_HLEN
if (bf && (skb_headlen(skb) <= sq->max_inline))
return skb_headlen(skb);
@@ -124,6 +124,21 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return MLX5E_MIN_INLINE;
}
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+ int cpy1_sz = 2 * ETH_ALEN;
+ int cpy2_sz = ihs - cpy1_sz;
+
+ skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+ skb_pull_inline(skb, cpy1_sz);
+ vhdr->h_vlan_proto = skb->vlan_proto;
+ vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+ skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+ cpy2_sz);
+ skb_pull_inline(skb, cpy2_sz);
+}
+
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -175,8 +190,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
ETH_ZLEN);
}
- skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
- skb_pull_inline(skb, ihs);
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+ ihs += VLAN_HLEN;
+ } else {
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+ }
eseg->inline_hdr_sz = cpu_to_be16(ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a40b96d4c662..713ead583347 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -346,6 +346,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+ eq->cons_index = 0;
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -381,10 +382,10 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
- eq->irqn = vecidx;
+ eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ err = request_irq(eq->irqn, mlx5_msix_handler, 0,
priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -420,12 +421,12 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
+ free_irq(eq->irqn, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
+ synchronize_irq(eq->irqn);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 292d76f2a904..f5deb642d0d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -46,39 +47,113 @@ enum {
enum {
MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
+ MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8,
MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
+ MLX5_HEALTH_SYNDR_EQ_INV = 0xe,
MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
+ MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
};
-static DEFINE_SPINLOCK(health_lock);
-static LIST_HEAD(health_list);
-static struct work_struct health_work;
+enum {
+ MLX5_NIC_IFC_FULL = 0,
+ MLX5_NIC_IFC_DISABLED = 1,
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2
+};
-static void health_care(struct work_struct *work)
+static u8 get_nic_interface(struct mlx5_core_dev *dev)
{
- struct mlx5_core_health *health, *n;
- struct mlx5_core_dev *dev;
- struct mlx5_priv *priv;
- LIST_HEAD(tlist);
+ return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+ unsigned long flags;
+ u64 vector;
- spin_lock_irq(&health_lock);
- list_splice_init(&health_list, &tlist);
+ /* wait for pending handlers to complete */
+ synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+ vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ if (!vector)
+ goto no_trig;
+
+ vector |= MLX5_TRIGGERED_CMD_COMP;
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ mlx5_cmd_comp_handler(dev, vector);
+ return;
+
+no_trig:
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
- spin_unlock_irq(&health_lock);
+ if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ return 1;
- list_for_each_entry_safe(health, n, &tlist, list) {
- priv = container_of(health, struct mlx5_priv, health);
- dev = container_of(priv, struct mlx5_core_dev, priv);
- mlx5_core_warn(dev, "handling bad device here\n");
- /* nothing yet */
- spin_lock_irq(&health_lock);
- list_del_init(&health->list);
- spin_unlock_irq(&health_lock);
+ if (ioread32be(&h->fw_ver) == 0xffffffff)
+ return 1;
+
+ return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return;
+
+ mlx5_core_err(dev, "start\n");
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+
+ mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+ mlx5_core_err(dev, "end\n");
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+ u8 nic_interface = get_nic_interface(dev);
+
+ switch (nic_interface) {
+ case MLX5_NIC_IFC_FULL:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
+ break;
+
+ case MLX5_NIC_IFC_DISABLED:
+ mlx5_core_warn(dev, "starting teardown\n");
+ break;
+
+ case MLX5_NIC_IFC_NO_DRAM_NIC:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
+ break;
+ default:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
+ nic_interface);
}
+
+ mlx5_disable_device(dev);
+}
+
+static void health_care(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+
+ health = container_of(work, struct mlx5_core_health, work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+ mlx5_core_warn(dev, "handling bad device here\n");
+ mlx5_handle_bad_state(dev);
}
static const char *hsynd_str(u8 synd)
@@ -88,6 +163,8 @@ static const char *hsynd_str(u8 synd)
return "firmware internal error";
case MLX5_HEALTH_SYNDR_IRISC_ERR:
return "irisc not responding";
+ case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+ return "unrecoverable hardware error";
case MLX5_HEALTH_SYNDR_CRC_ERR:
return "firmware CRC error";
case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
@@ -98,48 +175,81 @@ static const char *hsynd_str(u8 synd)
return "async EQ buffer overrun";
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
+ case MLX5_HEALTH_SYNDR_EQ_INV:
+ return "Invalid EQ refrenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
+ case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+ return "High temprature";
default:
return "unrecognized error";
}
}
-static u16 read_be16(__be16 __iomem *p)
+static u16 get_maj(u32 fw)
{
- return swab16(readl((__force u16 __iomem *) p));
+ return fw >> 28;
}
-static u32 read_be32(__be32 __iomem *p)
+static u16 get_min(u32 fw)
{
- return swab32(readl((__force u32 __iomem *) p));
+ return fw >> 16 & 0xfff;
+}
+
+static u16 get_sub(u32 fw)
+{
+ return fw & 0xffff;
}
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
+ char fw_str[18];
+ u32 fw;
int i;
+ /* If the syndrom is 0, the device is OK and no need to print buffer */
+ if (!ioread8(&h->synd))
+ return;
+
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
+ dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+
+ dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+ dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ fw = ioread32be(&h->fw_ver);
+ sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+ dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
+ dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+ dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
+ dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+}
+
+static unsigned long get_next_poll_jiffies(void)
+{
+ unsigned long next;
- pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
- pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra));
- pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver));
- pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id));
- pr_info("irisc_index %d\n", readb(&h->irisc_index));
- pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
- pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync));
+ get_random_bytes(&next, sizeof(next));
+ next %= HZ;
+ next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+ return next;
}
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long next;
u32 count;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ trigger_cmd_completions(dev);
+ mod_timer(&health->timer, get_next_poll_jiffies());
+ return;
+ }
+
count = ioread32be(health->health_counter);
if (count == health->prev)
++health->miss_counter;
@@ -148,18 +258,16 @@ static void poll_health(unsigned long data)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
- mlx5_core_err(dev, "device's health compromised\n");
+ dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
- spin_lock_irq(&health_lock);
- list_add_tail(&health->list, &health_list);
- spin_unlock_irq(&health_lock);
-
- queue_work(mlx5_core_wq, &health_work);
} else {
- get_random_bytes(&next, sizeof(next));
- next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
- mod_timer(&health->timer, next);
+ mod_timer(&health->timer, get_next_poll_jiffies());
+ }
+
+ if (in_fatal(dev) && !health->sick) {
+ health->sick = true;
+ print_health_info(dev);
+ queue_work(health->wq, &health->work);
}
}
@@ -167,7 +275,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- INIT_LIST_HEAD(&health->list);
init_timer(&health->timer);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -183,18 +290,33 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
del_timer_sync(&health->timer);
-
- spin_lock_irq(&health_lock);
- if (!list_empty(&health->list))
- list_del_init(&health->list);
- spin_unlock_irq(&health_lock);
}
-void mlx5_health_cleanup(void)
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ destroy_workqueue(health->wq);
}
-void __init mlx5_health_init(void)
+int mlx5_health_init(struct mlx5_core_dev *dev)
{
- INIT_WORK(&health_work, health_care);
+ struct mlx5_core_health *health;
+ char *name;
+
+ health = &dev->priv.health;
+ name = kmalloc(64, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strcpy(name, "mlx5_health");
+ strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
+ kfree(name);
+ if (!health->wq)
+ return -ENOMEM;
+
+ INIT_WORK(&health->work, health_care);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 03aabdd79abe..2388aec208fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -39,12 +39,14 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/kmod.h>
+#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
@@ -62,7 +64,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-struct workqueue_struct *mlx5_core_wq;
static LIST_HEAD(intf_list);
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
@@ -152,6 +153,25 @@ static struct mlx5_profile profile[] = {
},
};
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+ int err = 0;
+
+ while (fw_initializing(dev)) {
+ if (time_after(jiffies, end)) {
+ err = -EBUSY;
+ break;
+ }
+ msleep(FW_INIT_WAIT_MS);
+ }
+
+ return err;
+}
+
static int set_dma_caps(struct pci_dev *pdev)
{
int err;
@@ -182,6 +202,34 @@ static int set_dma_caps(struct pci_dev *pdev)
return err;
}
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->pci_status_mutex);
+ if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ mutex_lock(&dev->pci_status_mutex);
+ if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->pci_status_mutex);
+}
+
static int request_bar(struct pci_dev *pdev)
{
int err = 0;
@@ -672,12 +720,126 @@ static void unmap_bf_area(struct mlx5_core_dev *dev)
io_mapping_free(dev->priv.bf_mapping);
}
-static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ intf->remove(dev, dev_ctx->context);
+ kfree(dev_ctx);
+ return;
+ }
+}
+
+static int mlx5_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- int err;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&priv->dev_list, &dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err = 0;
- dev->pdev = pdev;
pci_set_drvdata(dev->pdev, dev);
strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
@@ -694,7 +856,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
if (!priv->dbg_root)
return -ENOMEM;
- err = pci_enable_device(pdev);
+ err = mlx5_pci_enable_device(dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
@@ -721,13 +883,61 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+err_disable:
+ mlx5_pci_disable_device(dev);
+
+err_dbg:
+ debugfs_remove(priv->dbg_root);
+ return err;
+}
+
+static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ iounmap(dev->iseg);
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+ mlx5_pci_disable_device(dev);
+ debugfs_remove(priv->dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+ dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+ __func__);
+ goto out;
+ }
+
dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
+ /* on load removing any previous indication of internal error, device is
+ * up
+ */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
- goto err_unmap;
+ goto out_err;
+ }
+
+ err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
+ FW_INIT_TIMEOUT_MILI);
+ goto out_err;
}
mlx5_pagealloc_init(dev);
@@ -842,8 +1052,29 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
+ err = mlx5_register_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+ goto err_reg_dev;
+ }
+
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
+ dev->interface_state = MLX5_INTERFACE_STATE_UP;
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+
return 0;
+err_reg_dev:
+ mlx5_cleanup_mr_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
+
err_unmap_bf_area:
unmap_bf_area(dev);
@@ -865,7 +1096,7 @@ err_stop_poll:
mlx5_stop_health_poll(dev);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- return err;
+ goto out_err;
}
err_pagealloc_stop:
@@ -881,25 +1112,25 @@ err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
-err_unmap:
- iounmap(dev->iseg);
-
-err_clr_master:
- pci_clear_master(dev->pdev);
- release_bar(dev->pdev);
-
-err_disable:
- pci_disable_device(dev->pdev);
+out_err:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mutex_unlock(&dev->intf_state_mutex);
-err_dbg:
- debugfs_remove(priv->dbg_root);
return err;
}
-static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
- struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+ dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
+ __func__);
+ goto out;
+ }
+ mlx5_unregister_device(dev);
+ mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
@@ -911,139 +1142,25 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
- if (mlx5_cmd_teardown_hca(dev)) {
+ err = mlx5_cmd_teardown_hca(dev);
+ if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- return;
+ goto out;
}
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
- iounmap(dev->iseg);
- pci_clear_master(dev->pdev);
- release_bar(dev->pdev);
- pci_disable_device(dev->pdev);
- debugfs_remove(priv->dbg_root);
-}
-
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx) {
- pr_warn("mlx5_add_device: alloc context failed\n");
- return;
- }
-
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(dev);
-
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- }
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
-
- intf->remove(dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
-}
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
- mutex_unlock(&intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
- mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
+out:
+ dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+ mutex_unlock(&dev->intf_state_mutex);
+ return err;
}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_device_context *dev_ctx;
@@ -1064,7 +1181,6 @@ struct mlx5_core_event_handler {
void *data);
};
-#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -1088,43 +1204,166 @@ static int init_one(struct pci_dev *pdev,
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
+ dev->pdev = pdev;
dev->event = mlx5_core_event;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
- err = mlx5_dev_init(dev, pdev);
+ mutex_init(&dev->pci_status_mutex);
+ mutex_init(&dev->intf_state_mutex);
+ err = mlx5_pci_init(dev, priv);
if (err) {
- dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
- goto out;
+ dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
+ goto clean_dev;
}
- err = mlx5_register_device(dev);
+ err = mlx5_health_init(dev);
if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto out_init;
+ dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
+ goto close_pci;
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
+ err = mlx5_load_one(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+ goto clean_health;
+ }
return 0;
-out_init:
- mlx5_dev_cleanup(dev);
-out:
+clean_health:
+ mlx5_health_cleanup(dev);
+close_pci:
+ mlx5_pci_close(dev, priv);
+clean_dev:
+ pci_set_drvdata(pdev, NULL);
kfree(dev);
+
return err;
}
+
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
- mlx5_unregister_device(dev);
- mlx5_dev_cleanup(dev);
+ if (mlx5_unload_one(dev, priv)) {
+ dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+ mlx5_health_cleanup(dev);
+ return;
+ }
+ mlx5_health_cleanup(dev);
+ mlx5_pci_close(dev, priv);
+ pci_set_drvdata(pdev, NULL);
kfree(dev);
}
+static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_enter_error_state(dev);
+ mlx5_unload_one(dev, priv);
+ mlx5_pci_disable_device(dev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ int err = 0;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+ err = mlx5_pci_enable_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+ , __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+/* wait for the device to show vital signs. For now we check
+ * that we can read the device ID and that the health buffer
+ * shows a non zero value which is different than 0xffffffff
+ */
+static void wait_vital(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_health *health = &dev->priv.health;
+ const int niter = 100;
+ u32 count;
+ u16 did;
+ int i;
+
+ /* Wait for firmware to be ready after reset */
+ msleep(1000);
+ for (i = 0; i < niter; i++) {
+ if (pci_read_config_word(pdev, 2, &did)) {
+ dev_warn(&pdev->dev, "failed reading config word\n");
+ break;
+ }
+ if (did == pdev->device) {
+ dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
+ break;
+ }
+ msleep(50);
+ }
+ if (i == niter)
+ dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+
+ for (i = 0; i < niter; i++) {
+ count = ioread32be(health->health_counter);
+ if (count && count != 0xffffffff) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ break;
+ }
+ msleep(50);
+ }
+
+ if (i == niter)
+ dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+ pci_save_state(pdev);
+ wait_vital(pdev);
+
+ err = mlx5_load_one(dev, priv);
+ if (err)
+ dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+ , __func__, err);
+ else
+ dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+}
+
+static const struct pci_error_handlers mlx5_err_handler = {
+ .error_detected = mlx5_pci_err_detected,
+ .slot_reset = mlx5_pci_slot_reset,
+ .resume = mlx5_pci_resume
+};
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
@@ -1141,7 +1380,8 @@ static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
.probe = init_one,
- .remove = remove_one
+ .remove = remove_one,
+ .err_handler = &mlx5_err_handler
};
static int __init init(void)
@@ -1149,16 +1389,10 @@ static int __init init(void)
int err;
mlx5_register_debugfs();
- mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
- if (!mlx5_core_wq) {
- err = -ENOMEM;
- goto err_debug;
- }
- mlx5_health_init();
err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_health;
+ goto err_debug;
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_init();
@@ -1166,9 +1400,6 @@ static int __init init(void)
return 0;
-err_health:
- mlx5_health_cleanup();
- destroy_workqueue(mlx5_core_wq);
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -1180,8 +1411,6 @@ static void __exit cleanup(void)
mlx5e_cleanup();
#endif
pci_unregister_driver(&mlx5_core_driver);
- mlx5_health_cleanup();
- destroy_workqueue(mlx5_core_wq);
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 566a70488db1..cee5b7a839bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -43,25 +43,25 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, ...) \
- pr_debug("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_dbg(__dev, format, ...) \
+ dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
- mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
+ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, ...) \
- pr_err("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_err(__dev, format, ...) \
+ dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, ...) \
- pr_warn("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_warn(__dev, format, ...) \
+ dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum {
@@ -86,6 +86,10 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 1adb300dd850..6fa22b51e460 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -40,6 +40,7 @@ void mlx5_init_mr_table(struct mlx5_core_dev *dev)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
+ memset(table, 0, sizeof(*table));
rwlock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 8a64542abc16..1cda5d268ec9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -275,12 +275,36 @@ out_alloc:
return err;
}
+
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
+{
+ struct mlx5_manage_pages_inbox *in;
+ struct mlx5_manage_pages_outbox out;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+ in->func_id = cpu_to_be16(func_id);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (!err)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+ if (err)
+ mlx5_core_warn(dev, "page notify failed\n");
+
+ kfree(in);
+}
+
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
- struct mlx5_manage_pages_inbox *nin;
int inlen;
u64 addr;
int err;
@@ -289,8 +313,9 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
+ err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
- return -ENOMEM;
+ goto out_free;
}
memset(&out, 0, sizeof(out));
@@ -316,43 +341,29 @@ retry:
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
- goto out_alloc;
+ goto out_4k;
}
dev->priv.fw_pages += npages;
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
- if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
- func_id, npages, out.hdr.status);
- goto out_alloc;
- }
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ if (err) {
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
+ goto out_4k;
}
mlx5_core_dbg(dev, "err %d\n", err);
- goto out_free;
-
-out_alloc:
- if (notify_fail) {
- nin = kzalloc(sizeof(*nin), GFP_KERNEL);
- if (!nin) {
- mlx5_core_warn(dev, "allocation failed\n");
- goto out_4k;
- }
- memset(&out, 0, sizeof(out));
- nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
- mlx5_core_warn(dev, "page notify failed\n");
- kfree(nin);
- }
+ kvfree(in);
+ return 0;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
kvfree(in);
+ if (notify_fail)
+ page_notify_fail(dev, func_id);
return err;
}
@@ -482,15 +493,20 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
struct fw_page *fwp;
struct rb_node *p;
int nclaimed = 0;
- int err;
+ int err = 0;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ free_4k(dev, fwp->addr);
+ nclaimed = 1;
+ } else {
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+ }
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 3b9480fa3403..a87e773e93f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -90,16 +90,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
{
struct mlx5_reg_pcap in;
struct mlx5_reg_pcap out;
- int err;
memset(&in, 0, sizeof(in));
in.caps_127_96 = cpu_to_be32(caps);
in.port_num = port_num;
- err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
- sizeof(out), MLX5_REG_PCAP, 0, 1);
-
- return err;
+ return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
+ sizeof(out), MLX5_REG_PCAP, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
@@ -107,16 +104,13 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
- err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
- ptys_size, MLX5_REG_PTYS, 0, 0);
-
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
@@ -199,7 +193,6 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
memset(in, 0, sizeof(in));
@@ -210,9 +203,8 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
else
MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PTYS, 0, 1);
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
@@ -250,7 +242,7 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
return err;
*status = MLX5_GET(paos_reg, out, admin_status);
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
@@ -308,15 +300,12 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(pvlc_reg, in, local_port, local_port);
- err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
- pvlc_size, MLX5_REG_PVLC, 0, 0);
-
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
}
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -339,16 +328,14 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
MLX5_SET(pfcc_reg, in, pprx, rx_pause);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PFCC, 0, 1);
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 8b494b562263..30e2ba3f5f16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -345,6 +345,7 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c48f504ccbeb..ffada801976b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -531,6 +531,7 @@ void mlx5_init_srq_table(struct mlx5_core_dev *dev)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index b4c87c7b0cf0..d7068f54e800 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -177,7 +177,7 @@ int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
memset(in, 0, sizeof(in));
@@ -206,7 +206,7 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
memset(in, 0, sizeof(in));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2941d9c5ae48..e36e12219c9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -30,3 +30,14 @@ config MLXSW_SWITCHX2
To compile this driver as a module, choose M here: the
module will be called mlxsw_switchx2.
+
+config MLXSW_SPECTRUM
+ tristate "Mellanox Technologies Spectrum support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies Spectrum Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0a05f65ee814..af015818fd19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -4,3 +4,6 @@ obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
mlxsw_pci-objs := pci.o
obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
mlxsw_switchx2-objs := switchx2.o
+obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
+mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
+ spectrum_switchdev.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 770db17eb03f..cd63b8263688 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
* passed in this command must be pinned.
*/
+#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
+
static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
char *in_mbox, u32 vpm_entries_count)
{
@@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
-/* cmd_mbox_config_profile_set_fid_based
+/* cmd_mbox_config_profile_set_flood_mode
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
@@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
/* cmd_mbox_config_profile_max_flood_tables
- * Maximum number of Flooding Tables. Flooding Tables are associated to
- * the different packet types for the different switch partitions.
- * Note that the table size depends on the fid_based mode.
- * In SwitchX silicon, tables are split equally between the switch
- * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
- * with swid-1 and the last 4 are associated with swid-2.
+ * Maximum number of single-entry flooding tables. Different flooding tables
+ * can be associated with different packet types.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
@@ -665,15 +663,42 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
-/* cmd_mbox_config_profile_fid_based
- * FID Based Flood Mode
- * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
- * 01 Use FID to offset the index to the Port Group Table (pgi)
- * 10 Use FID to offset the index to the Port Group Table (pgi) and
- * the Multicast ID
+/* cmd_mbox_config_profile_flood_mode
+ * Flooding mode to use.
+ * 0-2 - Backward compatible modes for SwitchX devices.
+ * 3 - Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset tables.
+ * max_fid_flood_tables indicates the number of per-FID tables.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+/* cmd_mbox_config_profile_max_fid_offset_flood_tables
+ * Maximum number of FID-offset flooding tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ max_fid_offset_flood_tables, 0x34, 24, 4);
+
+/* cmd_mbox_config_profile_fid_offset_flood_table_size
+ * The size (number of entries) of each FID-offset flood table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ fid_offset_flood_table_size, 0x34, 0, 16);
+
+/* cmd_mbox_config_profile_max_fid_flood_tables
+ * Maximum number of per-FID flooding tables.
+ *
+ * Note: This flooding tables cover special FIDs only (vFIDs), starting at
+ * FID value 4K and higher.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
+
+/* cmd_mbox_config_profile_fid_flood_table_size
+ * The size (number of entries) of each per-FID table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
+
/* cmd_mbox_config_profile_max_ib_mc
* Maximum number of multicast FDB records for InfiniBand
* FDB (in 512 chunks) per InfiniBand switch partition.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 28c19cc1a17c..97f0d93caf99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
mlxsw_emad_op_tlv_status_set(op_tlv, 0);
mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
- if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+ if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
mlxsw_emad_op_tlv_method_set(op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_QUERY);
else
@@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
char *op_tlv;
op_tlv = mlxsw_emad_op_tlv(skb);
- return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+ return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
#define MLXSW_EMAD_TIMEOUT_MS 200
@@ -511,7 +511,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
return err;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
- MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_TRAP_ID_ETHEMAD);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
}
@@ -556,8 +555,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
{
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ mlxsw_core->emad.use_emad = false;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
- MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_TRAP_ID_ETHEMAD);
mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 165808471188..807827350a89 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -54,6 +54,7 @@
MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum"
struct mlxsw_core;
struct mlxsw_driver;
@@ -153,6 +154,10 @@ struct mlxsw_config_profile {
u8 max_flood_tables;
u8 max_vid_flood_tables;
u8 flood_mode;
+ u8 max_fid_offset_flood_tables;
+ u16 fid_offset_flood_table_size;
+ u8 max_fid_flood_tables;
+ u16 fid_flood_table_size;
u16 max_ib_mc;
u16 max_pkey;
u8 ar_sec;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 36fb1cec53c9..a94dbda6590b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -171,15 +171,21 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
}
static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
- struct mlxsw_item *item)
+ struct mlxsw_item *item,
+ unsigned short index)
{
- memcpy(dst, &buf[item->offset], item->size.bytes);
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(dst, &buf[offset], item->size.bytes);
}
-static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
- struct mlxsw_item *item)
+static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
+ struct mlxsw_item *item,
+ unsigned short index)
{
- memcpy(&buf[item->offset], src, item->size.bytes);
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(&buf[offset], src, item->size.bytes);
}
static inline u16
@@ -373,12 +379,40 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
static inline void \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
{ \
- __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+}
+
+#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
+ _step, _instepoffset) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \
+ unsigned short index, \
+ char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
} \
static inline void \
-mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
+ unsigned short index, \
+ const char *src) \
{ \
- __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
}
#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index cef866c37648..de69e719dc9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -57,6 +57,7 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
static const struct pci_device_id mlxsw_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, }
};
@@ -67,6 +68,8 @@ static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
switch (id->device) {
case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
return MLXSW_DEVICE_KIND_SWITCHX2;
+ case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
+ return MLXSW_DEVICE_KIND_SPECTRUM;
default:
BUG();
}
@@ -171,8 +174,8 @@ struct mlxsw_pci {
struct msix_entry msix_entry;
struct mlxsw_core *core;
struct {
- u16 num_pages;
struct mlxsw_pci_mem_item *items;
+ unsigned int count;
} fw_area;
struct {
struct mlxsw_pci_mem_item out_mbox;
@@ -431,8 +434,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
- if (net_ratelimit())
- dev_err(&pdev->dev, "failed to dma map tx frag\n");
+ dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
@@ -497,6 +499,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
+ u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
int i;
int err;
@@ -504,9 +507,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->consumer_counter = 0;
/* Set CQ of same number of this RDQ with base
- * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+ * above SDQ count as the lower ones are assigned to SDQs.
*/
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -699,8 +702,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
put_new_skb:
memset(wqe, 0, q->elem_size);
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
- if (err && net_ratelimit())
- dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ if (err)
+ dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -830,7 +833,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
{
struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
struct mlxsw_pci *mlxsw_pci = q->pci;
- unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+ u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
char *eqe;
u8 cqn;
bool cq_handle = false;
@@ -866,7 +870,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
if (!cq_handle)
return;
- for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+ for_each_set_bit(cqn, active_cqns, cq_count) {
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
mlxsw_pci_queue_tasklet_schedule(q);
}
@@ -1067,10 +1071,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
- if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
- (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
- (num_cqs != MLXSW_PCI_CQS_COUNT) ||
- (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+ if (num_sdqs + num_rdqs > num_cqs ||
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
}
@@ -1215,6 +1217,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->max_flood_tables);
mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
mbox, profile->max_vid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
+ mbox, profile->max_fid_offset_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
+ mbox, profile->fid_offset_flood_table_size);
+ mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
+ mbox, profile->max_fid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
+ mbox, profile->fid_flood_table_size);
}
if (profile->used_flood_mode) {
mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
@@ -1272,6 +1282,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
u16 num_pages)
{
struct mlxsw_pci_mem_item *mem_item;
+ int nent = 0;
int i;
int err;
@@ -1279,7 +1290,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
GFP_KERNEL);
if (!mlxsw_pci->fw_area.items)
return -ENOMEM;
- mlxsw_pci->fw_area.num_pages = num_pages;
+ mlxsw_pci->fw_area.count = num_pages;
mlxsw_cmd_mbox_zero(mbox);
for (i = 0; i < num_pages; i++) {
@@ -1293,13 +1304,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = -ENOMEM;
goto err_alloc;
}
- mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
- mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
+ if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ nent = 0;
+ mlxsw_cmd_mbox_zero(mbox);
+ }
}
- err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
- if (err)
- goto err_cmd_map_fa;
+ if (nent) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ }
return 0;
@@ -1322,7 +1342,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
mlxsw_cmd_unmap_fa(mlxsw_pci->core);
- for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+ for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
@@ -1642,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
CIR_OUT_PARAM_LO));
memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
}
- } else if (!err && out_mbox)
+ } else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+ }
mutex_unlock(&mlxsw_pci->cmd.lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 1ef9664b4512..142f33d978c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -40,6 +40,7 @@
#include "item.h"
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
#define MLXSW_PCI_PAGE_SIZE 4096
@@ -71,9 +72,7 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
-#define MLXSW_PCI_RDQS_COUNT 24
-#define MLXSW_PCI_SDQS_COUNT 24
-#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
#define MLXSW_PCI_EQ_COMP_NUM 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 096e1c12175a..236fb5d2ad69 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = {
*/
MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
-/* SMID - Switch Multicast ID
- * --------------------------
- * In multi-chip configuration, each device should maintain mapping between
- * Multicast ID (MID) into a list of local ports. This mapping is used in all
- * the devices other than the ingress device, and is implemented as part of the
- * FDB. The MID record maps from a MID, which is a unique identi- fier of the
- * multicast group within the stacking domain, into a list of local ports into
- * which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x420
-
-static const struct mlxsw_reg_info mlxsw_reg_smid = {
- .id = MLXSW_REG_SMID_ID,
- .len = MLXSW_REG_SMID_LEN,
-};
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
-{
- MLXSW_REG_ZERO(smid, payload);
- mlxsw_reg_smid_swid_set(payload, 0);
- mlxsw_reg_smid_mid_set(payload, mid);
- mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
- mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
-}
-
/* SSPR - Switch System Port Record Register
* -----------------------------------------
* Configures the system port to local port mapping.
@@ -208,11 +157,359 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
mlxsw_reg_sspr_system_port_set(payload, local_port);
}
+/* SFDAT - Switch Filtering Database Aging Time
+ * --------------------------------------------
+ * Controls the Switch aging time. Aging time is able to be set per Switch
+ * Partition.
+ */
+#define MLXSW_REG_SFDAT_ID 0x2009
+#define MLXSW_REG_SFDAT_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
+ .id = MLXSW_REG_SFDAT_ID,
+ .len = MLXSW_REG_SFDAT_LEN,
+};
+
+/* reg_sfdat_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
+
+/* reg_sfdat_age_time
+ * Aging time in seconds
+ * Min - 10 seconds
+ * Max - 1,000,000 seconds
+ * Default is 300 seconds.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
+
+static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
+{
+ MLXSW_REG_ZERO(sfdat, payload);
+ mlxsw_reg_sfdat_swid_set(payload, 0);
+ mlxsw_reg_sfdat_age_time_set(payload, age_time);
+}
+
+/* SFD - Switch Filtering Database
+ * -------------------------------
+ * The following register defines the access to the filtering database.
+ * The register supports querying, adding, removing and modifying the database.
+ * The access is optimized for bulk updates in which case more than one
+ * FDB record is present in the same command.
+ */
+#define MLXSW_REG_SFD_ID 0x200A
+#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFD_REC_MAX_COUNT 64
+#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
+ MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfd = {
+ .id = MLXSW_REG_SFD_ID,
+ .len = MLXSW_REG_SFD_LEN,
+};
+
+/* reg_sfd_swid
+ * Switch partition ID for queries. Reserved on Write.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfd_op {
+ /* Dump entire FDB a (process according to record_locator) */
+ MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
+ /* Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
+ /* Query and clear activity. Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
+ /* Test. Response indicates if each of the records could be
+ * added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_TEST = 0,
+ /* Add/modify. Aged-out records cannot be added. This command removes
+ * the learning notification of the {MAC, VID/FID}. Response includes
+ * the entries that were added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
+ /* Remove record by {MAC, VID/FID}. This command also removes
+ * the learning notification and aged-out notifications
+ * of the {MAC, VID/FID}. The response provides current (pre-removal)
+ * entries as non-aged-out.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
+ /* Remove learned notification by {MAC, VID/FID}. The response provides
+ * the removed learning notification.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
+};
+
+/* reg_sfd_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
+
+/* reg_sfd_record_locator
+ * Used for querying the FDB. Use record_locator=0 to initiate the
+ * query. When a record is returned, a new record_locator is
+ * returned to be used in the subsequent query.
+ * Reserved for database update.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
+
+/* reg_sfd_num_rec
+ * Request: Number of records to read/add/modify/remove
+ * Response: Number of records read/added/replaced/removed
+ * See above description for more details.
+ * Ranges 0..64
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
+
+static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
+ u32 record_locator)
+{
+ MLXSW_REG_ZERO(sfd, payload);
+ mlxsw_reg_sfd_op_set(payload, op);
+ mlxsw_reg_sfd_record_locator_set(payload, record_locator);
+}
+
+/* reg_sfd_rec_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_type {
+ MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
+};
+
+/* reg_sfd_rec_type
+ * FDB record type.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_policy {
+ /* Replacement disabled, aging disabled. */
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
+ /* (mlag remote): Replacement enabled, aging disabled,
+ * learning notification enabled on this port.
+ */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
+ /* (ingress device): Replacement enabled, aging enabled. */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
+};
+
+/* reg_sfd_rec_policy
+ * Policy.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_a
+ * Activity. Set for new static entries. Set for static entries if a frame SMAC
+ * lookup hits on the entry.
+ * To clear the a bit, use "query and clear activity" op.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_mac
+ * MAC address.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
+ MLXSW_REG_SFD_REC_LEN, 0x02);
+
+enum mlxsw_reg_sfd_rec_action {
+ /* forward */
+ MLXSW_REG_SFD_REC_ACTION_NOP = 0,
+ /* forward and trap, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
+ /* trap and do not forward, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+ MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
+};
+
+/* reg_sfd_rec_action
+ * Action to apply on the packet.
+ * Note: Dynamic entries can only be configured with NOP action.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_sub_port
+ * VEPA channel on local port.
+ * Valid only if local port is a non-stacking port. Must be 0 if multichannel
+ * VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 vid,
+ enum mlxsw_reg_sfd_rec_action action,
+ u8 local_port)
+{
+ u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_rec_type_set(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
+ mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid);
+ mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
+ mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
+}
+
+static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
+{
+ mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
+}
+
+/* SFN - Switch FDB Notification Register
+ * -------------------------------------------
+ * The switch provides notifications on newly learned FDB entries and
+ * aged out entries. The notifications can be polled by software.
+ */
+#define MLXSW_REG_SFN_ID 0x200B
+#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFN_REC_MAX_COUNT 64
+#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
+ MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfn = {
+ .id = MLXSW_REG_SFN_ID,
+ .len = MLXSW_REG_SFN_LEN,
+};
+
+/* reg_sfn_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+
+/* reg_sfn_num_rec
+ * Request: Number of learned notifications and aged-out notification
+ * records requested.
+ * Response: Number of notification records returned (must be smaller
+ * than or equal to the value requested)
+ * Ranges 0..64
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
+
+static inline void mlxsw_reg_sfn_pack(char *payload)
+{
+ MLXSW_REG_ZERO(sfn, payload);
+ mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
+}
+
+/* reg_sfn_rec_swid
+ * Switch partition ID.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfn_rec_type {
+ /* MAC addresses learned on a regular port. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
+ /* Aged-out MAC address on a regular port */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
+};
+
+/* reg_sfn_rec_type
+ * Notification record type.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+/* reg_sfn_rec_mac
+ * MAC address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
+ MLXSW_REG_SFN_REC_LEN, 0x02);
+
+/* reg_sfn_mac_sub_port
+ * VEPA channel on the local port.
+ * 0 if multichannel VEPA is not enabled.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_fid
+ * Filtering identifier.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
+{
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
+}
+
/* SPMS - Switch Port MSTP/RSTP State Register
* -------------------------------------------
* Configures the spanning tree state of a physical port.
*/
-#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_ID 0x200D
#define MLXSW_REG_SPMS_LEN 0x404
static const struct mlxsw_reg_info mlxsw_reg_spms = {
@@ -243,20 +540,166 @@ enum mlxsw_reg_spms_state {
*/
MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
- enum mlxsw_reg_spms_state state)
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
{
MLXSW_REG_ZERO(spms, payload);
mlxsw_reg_spms_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
mlxsw_reg_spms_state_set(payload, vid, state);
}
+/* SPVID - Switch Port VID
+ * -----------------------
+ * The switch port VID configures the default VID for a port.
+ */
+#define MLXSW_REG_SPVID_ID 0x200E
+#define MLXSW_REG_SPVID_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_spvid = {
+ .id = MLXSW_REG_SPVID_ID,
+ .len = MLXSW_REG_SPVID_LEN,
+};
+
+/* reg_spvid_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+
+/* reg_spvid_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+
+/* reg_spvid_pvid
+ * Port default VID
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
+
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+{
+ MLXSW_REG_ZERO(spvid, payload);
+ mlxsw_reg_spvid_local_port_set(payload, local_port);
+ mlxsw_reg_spvid_pvid_set(payload, pvid);
+}
+
+/* SPVM - Switch Port VLAN Membership
+ * ----------------------------------
+ * The Switch Port VLAN Membership register configures the VLAN membership
+ * of a port in a VLAN denoted by VID. VLAN membership is managed per
+ * virtual port. The register can be used to add and remove VID(s) from a port.
+ */
+#define MLXSW_REG_SPVM_ID 0x200F
+#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
+ MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvm = {
+ .id = MLXSW_REG_SPVM_ID,
+ .len = MLXSW_REG_SPVM_LEN,
+};
+
+/* reg_spvm_pt
+ * Priority tagged. If this bit is set, packets forwarded to the port with
+ * untagged VLAN membership (u bit is set) will be tagged with priority tag
+ * (VID=0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
+
+/* reg_spvm_pte
+ * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
+ * the pt bit will NOT be updated. To update the pt bit, pte must be set.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
+
+/* reg_spvm_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+
+/* reg_spvm_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
+
+/* reg_spvm_num_rec
+ * Number of records to update. Each record contains: i, e, u, vid.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
+
+/* reg_spvm_rec_i
+ * Ingress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
+ MLXSW_REG_SPVM_BASE_LEN, 14, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_e
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
+ MLXSW_REG_SPVM_BASE_LEN, 13, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_u
+ * Untagged - port is an untagged member - egress transmission uses untagged
+ * frames on VID<n>
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
+ MLXSW_REG_SPVM_BASE_LEN, 12, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_vid
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
+ MLXSW_REG_SPVM_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
+{
+ int size = vid_end - vid_begin + 1;
+ int i;
+
+ MLXSW_REG_ZERO(spvm, payload);
+ mlxsw_reg_spvm_local_port_set(payload, local_port);
+ mlxsw_reg_spvm_num_rec_set(payload, size);
+
+ for (i = 0; i < size; i++) {
+ mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
+ mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
/* SFGC - Switch Flooding Group Configuration
* ------------------------------------------
* The following register controls the association of flooding tables and MIDs
* to packet types used for flooding.
*/
-#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
@@ -265,13 +708,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
};
enum mlxsw_reg_sfgc_type {
- MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
- MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
- MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
- MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_TYPE_RESERVED,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
+ MLXSW_REG_SFGC_TYPE_MAX,
};
/* reg_sfgc_type
@@ -408,7 +853,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
unsigned int flood_table,
unsigned int index,
enum mlxsw_flood_table_type table_type,
- unsigned int range)
+ unsigned int range, u8 port, bool set)
{
MLXSW_REG_ZERO(sftr, payload);
mlxsw_reg_sftr_swid_set(payload, 0);
@@ -416,8 +861,8 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
mlxsw_reg_sftr_index_set(payload, index);
mlxsw_reg_sftr_table_type_set(payload, table_type);
mlxsw_reg_sftr_range_set(payload, range);
- mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
- mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_sftr_port_set(payload, port, set);
+ mlxsw_reg_sftr_port_mask_set(payload, port, 1);
}
/* SPMLR - Switch Port MAC Learning Register
@@ -473,6 +918,285 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
mlxsw_reg_spmlr_learn_mode_set(payload, mode);
}
+/* SVFA - Switch VID to FID Allocation Register
+ * --------------------------------------------
+ * Controls the VID to FID mapping and {Port, VID} to FID mapping for
+ * virtualized ports.
+ */
+#define MLXSW_REG_SVFA_ID 0x201C
+#define MLXSW_REG_SVFA_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_svfa = {
+ .id = MLXSW_REG_SVFA_ID,
+ .len = MLXSW_REG_SVFA_LEN,
+};
+
+/* reg_svfa_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
+
+/* reg_svfa_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_svfa_mt {
+ MLXSW_REG_SVFA_MT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+};
+
+/* reg_svfa_mapping_table
+ * Mapping table:
+ * 0 - VID to FID
+ * 1 - {Port, VID} to FID
+ * Access: Index
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
+
+/* reg_svfa_v
+ * Valid.
+ * Valid if set.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
+
+/* reg_svfa_fid
+ * Filtering ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
+
+/* reg_svfa_vid
+ * VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
+
+/* reg_svfa_counter_set_type
+ * Counter set type for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
+
+/* reg_svfa_counter_index
+ * Counter index for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
+
+static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, u16 vid)
+{
+ MLXSW_REG_ZERO(svfa, payload);
+ local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
+ mlxsw_reg_svfa_swid_set(payload, 0);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_mapping_table_set(payload, mt);
+ mlxsw_reg_svfa_v_set(payload, valid);
+ mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+/* SVPE - Switch Virtual-Port Enabling Register
+ * --------------------------------------------
+ * Enables port virtualization.
+ */
+#define MLXSW_REG_SVPE_ID 0x201E
+#define MLXSW_REG_SVPE_LEN 0x4
+
+static const struct mlxsw_reg_info mlxsw_reg_svpe = {
+ .id = MLXSW_REG_SVPE_ID,
+ .len = MLXSW_REG_SVPE_LEN,
+};
+
+/* reg_svpe_local_port
+ * Local port number
+ * Access: Index
+ *
+ * Note: CPU port is not supported (uses VLAN mode only).
+ */
+MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+
+/* reg_svpe_vp_en
+ * Virtual port enable.
+ * 0 - Disable, VLAN mode (VID to FID).
+ * 1 - Enable, Virtual port mode ({Port, VID} to FID).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
+
+static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+ bool enable)
+{
+ MLXSW_REG_ZERO(svpe, payload);
+ mlxsw_reg_svpe_local_port_set(payload, local_port);
+ mlxsw_reg_svpe_vp_en_set(payload, enable);
+}
+
+/* SFMR - Switch FID Management Register
+ * -------------------------------------
+ * Creates and configures FIDs.
+ */
+#define MLXSW_REG_SFMR_ID 0x201F
+#define MLXSW_REG_SFMR_LEN 0x18
+
+static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
+ .id = MLXSW_REG_SFMR_ID,
+ .len = MLXSW_REG_SFMR_LEN,
+};
+
+enum mlxsw_reg_sfmr_op {
+ MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_REG_SFMR_OP_DESTROY_FID,
+};
+
+/* reg_sfmr_op
+ * Operation.
+ * 0 - Create or edit FID.
+ * 1 - Destroy FID.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
+
+/* reg_sfmr_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+
+/* reg_sfmr_fid_offset
+ * FID offset.
+ * Used to point into the flooding table selected by SFGC register if
+ * the table is of type FID-Offset. Otherwise, this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
+
+/* reg_sfmr_vtfp
+ * Valid Tunnel Flood Pointer.
+ * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
+
+/* reg_sfmr_nve_tunnel_flood_ptr
+ * Underlay Flooding and BC Pointer.
+ * Used as a pointer to the first entry of the group based link lists of
+ * flooding or BC entries (for NVE tunnels).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
+
+/* reg_sfmr_vv
+ * VNI Valid.
+ * If not set, then vni is reserved.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
+
+/* reg_sfmr_vni
+ * Virtual Network Identifier.
+ * Access: RW
+ *
+ * Note: A given VNI can only be assigned to one FID.
+ */
+MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+
+static inline void mlxsw_reg_sfmr_pack(char *payload,
+ enum mlxsw_reg_sfmr_op op, u16 fid,
+ u16 fid_offset)
+{
+ MLXSW_REG_ZERO(sfmr, payload);
+ mlxsw_reg_sfmr_op_set(payload, op);
+ mlxsw_reg_sfmr_fid_set(payload, fid);
+ mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
+ mlxsw_reg_sfmr_vtfp_set(payload, false);
+ mlxsw_reg_sfmr_vv_set(payload, false);
+}
+
+/* SPVMLR - Switch Port VLAN MAC Learning Register
+ * -----------------------------------------------
+ * Controls the switch MAC learning policy per {Port, VID}.
+ */
+#define MLXSW_REG_SPVMLR_ID 0x2020
+#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
+ MLXSW_REG_SPVMLR_REC_LEN * \
+ MLXSW_REG_SPVMLR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
+ .id = MLXSW_REG_SPVMLR_ID,
+ .len = MLXSW_REG_SPVMLR_LEN,
+};
+
+/* reg_spvmlr_local_port
+ * Local ingress port.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+
+/* reg_spvmlr_num_rec
+ * Number of records to update.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
+
+/* reg_spvmlr_rec_learn_enable
+ * 0 - Disable learning for {Port, VID}.
+ * 1 - Enable learning for {Port, VID}.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
+ 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+/* reg_spvmlr_rec_vid
+ * VLAN ID to be added/removed from port or for querying.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ int num_rec = vid_end - vid_begin + 1;
+ int i;
+
+ WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
+
+ MLXSW_REG_ZERO(spvmlr, payload);
+ mlxsw_reg_spvmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
+
+ for (i = 0; i < num_rec; i++) {
+ mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
+ mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -1008,12 +1732,88 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
}
+/* PBMC - Port Buffer Management Control Register
+ * ----------------------------------------------
+ * The PBMC register configures and retrieves the port packet buffer
+ * allocation for different Prios, and the Pause threshold management.
+ */
+#define MLXSW_REG_PBMC_ID 0x500C
+#define MLXSW_REG_PBMC_LEN 0x68
+
+static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
+ .id = MLXSW_REG_PBMC_ID,
+ .len = MLXSW_REG_PBMC_LEN,
+};
+
+/* reg_pbmc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+
+/* reg_pbmc_xoff_timer_value
+ * When device generates a pause frame, it uses this value as the pause
+ * timer (time for the peer port to pause in quota-512 bit time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
+
+/* reg_pbmc_xoff_refresh
+ * The time before a new pause frame should be sent to refresh the pause RW
+ * state. Using the same units as xoff_timer_value above (in quota-512 bit
+ * time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+
+/* reg_pbmc_buf_lossy
+ * The field indicates if the buffer is lossy.
+ * 0 - Lossless
+ * 1 - Lossy
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_epsb
+ * Eligible for Port Shared buffer.
+ * If epsb is set, packets assigned to buffer are allowed to insert the port
+ * shared buffer.
+ * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_size
+ * The part of the packet buffer array is allocated for the specific buffer.
+ * Units are represented in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+
+static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+ u16 xoff_timer_value, u16 xoff_refresh)
+{
+ MLXSW_REG_ZERO(pbmc, payload);
+ mlxsw_reg_pbmc_local_port_set(payload, local_port);
+ mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
+ mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
+}
+
+static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
+ int buf_index,
+ u16 size)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+}
+
/* PSPA - Port Switch Partition Allocation
* ---------------------------------------
* Controls the association of a port with a switch partition and enables
* configuring ports as stacking ports.
*/
-#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_ID 0x500D
#define MLXSW_REG_PSPA_LEN 0x8
static const struct mlxsw_reg_info mlxsw_reg_pspa = {
@@ -1074,8 +1874,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
-#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
-#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1
+enum mlxsw_reg_htgt_trap_group {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
+};
/* reg_htgt_trap_group
* Trap group number. User defined number specifying which trap groups
@@ -1142,6 +1945,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
/* reg_htgt_local_path_rdq
* Receive descriptor queue (RDQ) to use for the trap group.
@@ -1149,21 +1953,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
*/
MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
-static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+static inline void mlxsw_reg_htgt_pack(char *payload,
+ enum mlxsw_reg_htgt_trap_group group)
{
u8 swid, rdq;
MLXSW_REG_ZERO(htgt, payload);
- if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+ switch (group) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
swid = MLXSW_PORT_SWID_ALL_SWIDS;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
- } else {
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_RX:
swid = 0;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
+ break;
}
mlxsw_reg_htgt_swid_set(payload, swid);
mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
- mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+ mlxsw_reg_htgt_trap_group_set(payload, group);
mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
mlxsw_reg_htgt_pid_set(payload, 0);
mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
@@ -1254,17 +2066,290 @@ enum {
*/
MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
-static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
- u8 trap_group, u16 trap_id)
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+
MLXSW_REG_ZERO(hpkt, payload);
mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
mlxsw_reg_hpkt_action_set(payload, action);
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_ETHEMAD:
+ case MLXSW_TRAP_ID_PUDE:
+ trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
+ break;
+ default:
+ trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
+ break;
+ }
mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
}
+/* SBPR - Shared Buffer Pools Register
+ * -----------------------------------
+ * The SBPR configures and retrieves the shared buffer pools and configuration.
+ */
+#define MLXSW_REG_SBPR_ID 0xB001
+#define MLXSW_REG_SBPR_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
+ .id = MLXSW_REG_SBPR_ID,
+ .len = MLXSW_REG_SBPR_LEN,
+};
+
+enum mlxsw_reg_sbpr_dir {
+ MLXSW_REG_SBPR_DIR_INGRESS,
+ MLXSW_REG_SBPR_DIR_EGRESS,
+};
+
+/* reg_sbpr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
+
+/* reg_sbpr_pool
+ * Pool index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+
+/* reg_sbpr_size
+ * Pool size in buffer cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
+
+enum mlxsw_reg_sbpr_mode {
+ MLXSW_REG_SBPR_MODE_STATIC,
+ MLXSW_REG_SBPR_MODE_DYNAMIC,
+};
+
+/* reg_sbpr_mode
+ * Pool quota calculation mode.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
+
+static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
+ enum mlxsw_reg_sbpr_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+ MLXSW_REG_ZERO(sbpr, payload);
+ mlxsw_reg_sbpr_pool_set(payload, pool);
+ mlxsw_reg_sbpr_dir_set(payload, dir);
+ mlxsw_reg_sbpr_mode_set(payload, mode);
+ mlxsw_reg_sbpr_size_set(payload, size);
+}
+
+/* SBCM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBCM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-PG, including the binding to pool
+ * and definition of the associated quota.
+ */
+#define MLXSW_REG_SBCM_ID 0xB002
+#define MLXSW_REG_SBCM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
+ .id = MLXSW_REG_SBCM_ID,
+ .len = MLXSW_REG_SBCM_LEN,
+};
+
+/* reg_sbcm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+
+/* reg_sbcm_pg_buff
+ * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
+ * For PG buffer: range is 0..cap_max_pg_buffers - 1
+ * For traffic class: range is 0..cap_max_tclass - 1
+ * Note that when traffic class is in MC aware mode then the traffic
+ * classes which are MC aware cannot be configured.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
+
+enum mlxsw_reg_sbcm_dir {
+ MLXSW_REG_SBCM_DIR_INGRESS,
+ MLXSW_REG_SBCM_DIR_EGRESS,
+};
+
+/* reg_sbcm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
+
+/* reg_sbcm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+
+/* reg_sbcm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbcm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbcm_dir dir,
+ u32 min_buff, u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbcm, payload);
+ mlxsw_reg_sbcm_local_port_set(payload, local_port);
+ mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
+ mlxsw_reg_sbcm_dir_set(payload, dir);
+ mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbcm_pool_set(payload, pool);
+}
+
+/* SBPM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBPM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-Pool, including the definition
+ * of the associated quota.
+ */
+#define MLXSW_REG_SBPM_ID 0xB003
+#define MLXSW_REG_SBPM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
+ .id = MLXSW_REG_SBPM_ID,
+ .len = MLXSW_REG_SBPM_LEN,
+};
+
+/* reg_sbpm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+
+/* reg_sbpm_pool
+ * The pool associated to quota counting on the local_port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
+
+enum mlxsw_reg_sbpm_dir {
+ MLXSW_REG_SBPM_DIR_INGRESS,
+ MLXSW_REG_SBPM_DIR_EGRESS,
+};
+
+/* reg_sbpm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+
+/* reg_sbpm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
+
+/* reg_sbpm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+ enum mlxsw_reg_sbpm_dir dir,
+ u32 min_buff, u32 max_buff)
+{
+ MLXSW_REG_ZERO(sbpm, payload);
+ mlxsw_reg_sbpm_local_port_set(payload, local_port);
+ mlxsw_reg_sbpm_pool_set(payload, pool);
+ mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
+}
+
+/* SBMM - Shared Buffer Multicast Management Register
+ * --------------------------------------------------
+ * The SBMM register configures and retrieves the shared buffer allocation
+ * and configuration for MC packets according to Switch-Priority, including
+ * the binding to pool and definition of the associated quota.
+ */
+#define MLXSW_REG_SBMM_ID 0xB004
+#define MLXSW_REG_SBMM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
+ .id = MLXSW_REG_SBMM_ID,
+ .len = MLXSW_REG_SBMM_LEN,
+};
+
+/* reg_sbmm_prio
+ * Switch Priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
+
+/* reg_sbmm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
+
+/* reg_sbmm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbmm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
+ u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbmm, payload);
+ mlxsw_reg_sbmm_prio_set(payload, prio);
+ mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbmm_pool_set(payload, pool);
+}
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
switch (reg_id) {
@@ -1272,18 +2357,34 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SGCR";
case MLXSW_REG_SPAD_ID:
return "SPAD";
- case MLXSW_REG_SMID_ID:
- return "SMID";
case MLXSW_REG_SSPR_ID:
return "SSPR";
+ case MLXSW_REG_SFDAT_ID:
+ return "SFDAT";
+ case MLXSW_REG_SFD_ID:
+ return "SFD";
+ case MLXSW_REG_SFN_ID:
+ return "SFN";
case MLXSW_REG_SPMS_ID:
return "SPMS";
+ case MLXSW_REG_SPVID_ID:
+ return "SPVID";
+ case MLXSW_REG_SPVM_ID:
+ return "SPVM";
case MLXSW_REG_SFGC_ID:
return "SFGC";
case MLXSW_REG_SFTR_ID:
return "SFTR";
case MLXSW_REG_SPMLR_ID:
return "SPMLR";
+ case MLXSW_REG_SVFA_ID:
+ return "SVFA";
+ case MLXSW_REG_SVPE_ID:
+ return "SVPE";
+ case MLXSW_REG_SFMR_ID:
+ return "SFMR";
+ case MLXSW_REG_SPVMLR_ID:
+ return "SPVMLR";
case MLXSW_REG_PMLP_ID:
return "PMLP";
case MLXSW_REG_PMTU_ID:
@@ -1296,12 +2397,22 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "PAOS";
case MLXSW_REG_PPCNT_ID:
return "PPCNT";
+ case MLXSW_REG_PBMC_ID:
+ return "PBMC";
case MLXSW_REG_PSPA_ID:
return "PSPA";
case MLXSW_REG_HTGT_ID:
return "HTGT";
case MLXSW_REG_HPKT_ID:
return "HPKT";
+ case MLXSW_REG_SBPR_ID:
+ return "SBPR";
+ case MLXSW_REG_SBCM_ID:
+ return "SBCM";
+ case MLXSW_REG_SBPM_ID:
+ return "SBPM";
+ case MLXSW_REG_SBMM_ID:
+ return "SBMM";
default:
return "*UNKNOWN*";
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
new file mode 100644
index 000000000000..3be4a2355ead
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -0,0 +1,1949 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
+ return 0;
+}
+
+static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ int err;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_SP_VFID_BASE + vfid, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+ if (err)
+ return err;
+
+ set_bit(vfid, mlxsw_sp->active_vfids);
+ return 0;
+}
+
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ clear_bit(vfid, mlxsw_sp->active_vfids);
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+ MLXSW_SP_VFID_BASE + vfid, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned char *addr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+
+ mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
+ mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
+
+ ether_addr_copy(addr, mlxsw_sp->base_mac);
+ addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svpe_pl[MLXSW_REG_SVPE_LEN];
+
+ mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
+}
+
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
+ fid, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool learn_enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvmlr_pl;
+ int err;
+
+ spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
+ if (!spvmlr_pl)
+ return -ENOMEM;
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
+ kfree(spvmlr_pl);
+ return err;
+}
+
+static int
+mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool *p_usable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sp_port_open(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sp_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+}
+
+static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sp_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ if (eth_skb_pad(skb)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvm_pl;
+ int err;
+
+ spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
+ if (!spvm_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, is_member, untagged);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
+ kfree(spvm_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
+ vid);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_vid_to_fid_set;
+ }
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ last_visited_vid = VLAN_N_VID;
+ goto err_port_vid_to_fid_set;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
+ vid);
+ return err;
+}
+
+static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 vid;
+ int err;
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
+ vid, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+ u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *sftr_pl;
+ int err;
+
+ /* VLAN 0 is added to HW filter when device goes up, but it is
+ * reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ netdev_warn(dev, "VID=%d already configured\n", vid);
+ return 0;
+ }
+
+ if (!test_bit(vid, mlxsw_sp->active_vfids)) {
+ err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create vFID=%d\n",
+ MLXSW_SP_VFID_BASE + vid);
+ return err;
+ }
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl) {
+ err = -ENOMEM;
+ goto err_flood_table_alloc;
+ }
+ mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
+ MLXSW_PORT_CPU_PORT, true);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err) {
+ netdev_err(dev, "Failed to configure flood table\n");
+ goto err_flood_table_config;
+ }
+ }
+
+ /* In case we fail in the following steps, we intentionally do not
+ * destroy the associated vFID.
+ */
+
+ /* When adding the first VLAN interface on a bridged port we need to
+ * transition all the active 802.1Q bridge VLANs to use explicit
+ * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
+ */
+ if (!mlxsw_sp_port->nr_vfids) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err) {
+ netdev_err(dev, "Failed to set to Virtual mode\n");
+ return err;
+ }
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ true, MLXSW_SP_VFID_BASE + vid, vid);
+ if (err) {
+ netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
+ vid, MLXSW_SP_VFID_BASE + vid);
+ goto err_port_vid_to_fid_set;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err) {
+ netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ goto err_port_vid_learning_set;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
+ if (err) {
+ netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+ vid);
+ goto err_port_add_vid;
+ }
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+ goto err_port_stp_state_set;
+ }
+
+ mlxsw_sp_port->nr_vfids++;
+ set_bit(vid, mlxsw_sp_port->active_vfids);
+
+ return 0;
+
+err_flood_table_config:
+err_flood_table_alloc:
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
+ return err;
+
+err_port_stp_state_set:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_add_vid:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
+ MLXSW_SP_VFID_BASE + vid, vid);
+err_port_vid_to_fid_set:
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ return err;
+}
+
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ /* VLAN 0 is removed from HW filter when device goes down, but
+ * it is reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ netdev_warn(dev, "VID=%d does not exist\n", vid);
+ return 0;
+ }
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ MLXSW_REG_SPMS_STATE_DISCARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ if (err) {
+ netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+ vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ if (err) {
+ netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ false, MLXSW_SP_VFID_BASE + vid,
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
+ vid, MLXSW_SP_VFID_BASE + vid);
+ return err;
+ }
+
+ /* When removing the last VLAN interface on a bridged port we need to
+ * transition all active 802.1Q bridge VLANs to use VID to FID
+ * mappings and set port's mode to VLAN mode.
+ */
+ if (mlxsw_sp_port->nr_vfids == 1) {
+ err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ if (err) {
+ netdev_err(dev, "Failed to set to VLAN mode\n");
+ return err;
+ }
+ }
+
+ mlxsw_sp_port->nr_vfids--;
+ clear_bit(vid, mlxsw_sp_port->active_vfids);
+
+ return 0;
+}
+
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+ .ndo_open = mlxsw_sp_port_open,
+ .ndo_stop = mlxsw_sp_port_stop,
+ .ndo_start_xmit = mlxsw_sp_port_xmit,
+ .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
+ .ndo_change_mtu = mlxsw_sp_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
+ .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+};
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sp->bus_info->fw_rev.major,
+ mlxsw_sp->bus_info->fw_rev.minor,
+ mlxsw_sp->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SP_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sp_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
+
+static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+ modes |= mlxsw_sp_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+ modes |= mlxsw_sp_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
+ speed = mlxsw_sp_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sp_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sp_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sp_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sp_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sp_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sp_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sp_port_get_strings,
+ .get_ethtool_stats = mlxsw_sp_port_get_stats,
+ .get_sset_count = mlxsw_sp_port_get_sset_count,
+ .get_settings = mlxsw_sp_port_get_settings,
+ .set_settings = mlxsw_sp_port_set_settings,
+};
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp_port->dev = dev;
+ mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->learning = 1;
+ mlxsw_sp_port->learning_sync = 1;
+ mlxsw_sp_port->uc_flood = 1;
+ mlxsw_sp_port->pvid = 1;
+
+ mlxsw_sp_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
+ if (!mlxsw_sp_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+
+ err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
+ mlxsw_sp_port->local_port);
+ goto err_dev_addr_init;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sp_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_buffers_init;
+ }
+
+ mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sp_port->local_port);
+ goto err_register_netdev;
+ }
+
+ err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_vlan_init;
+
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+ return 0;
+
+err_port_vlan_init:
+ unregister_netdev(dev);
+err_register_netdev:
+err_port_buffers_init:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_init:
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 vfid;
+
+ for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
+}
+
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (!mlxsw_sp_port)
+ return;
+ mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+ unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+ free_netdev(mlxsw_sp_port->dev);
+}
+
+static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->ports);
+}
+
+static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sp->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->ports);
+ return err;
+}
+
+static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
+ netdev_info(mlxsw_sp_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sp_port->dev);
+ } else {
+ netdev_info(mlxsw_sp_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sp_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sp_pude_event = {
+ .func = mlxsw_sp_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sp_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sp_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+}
+
+static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_sp_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sp_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ }
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sp_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ }
+}
+
+static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type)
+{
+ enum mlxsw_flood_table_type table_type;
+ enum mlxsw_sp_flood_table flood_table;
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+ if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
+ flood_table = 0;
+ } else {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+ if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+ flood_table = MLXSW_SP_FLOOD_TABLE_UC;
+ else
+ flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+ }
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
+ flood_table);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
+}
+
+static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int type, err;
+
+ /* For non-offloaded netdevs, flood all traffic types to CPU
+ * port.
+ */
+ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+ if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+ continue;
+
+ err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
+ if (err)
+ return err;
+ }
+
+ /* For bridged ports, use one flooding table for unknown unicast
+ * traffic and a second table for unregistered multicast and
+ * broadcast.
+ */
+ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+ if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+ continue;
+
+ err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ mlxsw_sp->core = mlxsw_core;
+ mlxsw_sp->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sp_base_mac_get(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
+ err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sp_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sp_flood_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ err = mlxsw_sp_buffers_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
+ goto err_buffers_init;
+ }
+
+ err = mlxsw_sp_switchdev_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
+ goto err_switchdev_init;
+ }
+
+ return 0;
+
+err_switchdev_init:
+err_buffers_init:
+err_flood_init:
+ mlxsw_sp_traps_fini(mlxsw_sp);
+err_rx_listener_register:
+ mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sp_ports_remove(mlxsw_sp);
+err_ports_create:
+ mlxsw_sp_vfids_fini(mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_fini(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_vfids_fini(mlxsw_sp);
+}
+
+static struct mlxsw_config_profile mlxsw_sp_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 64,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 2,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 1,
+ .fid_flood_table_size = VLAN_N_VID,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sp_driver = {
+ .kind = MLXSW_DEVICE_KIND_SPECTRUM,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp_init,
+ .fini = mlxsw_sp_fini,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp_config_profile,
+};
+
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* When port is not bridged untagged packets are tagged with
+ * PVID=VID=1, thereby creating an implicit VLAN interface in
+ * the device. Remove it and let bridge code take care of its
+ * own VLANs.
+ */
+ err = mlxsw_sp_port_kill_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to remove VID 1\n");
+
+ return err;
+}
+
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Add implicit VLAN interface in the device, so that untagged
+ * packets will be classified to the default vFID.
+ */
+ err = mlxsw_sp_port_add_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to add VID 1\n");
+
+ return err;
+}
+
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ return !mlxsw_sp->master_bridge.dev ||
+ mlxsw_sp->master_bridge.dev == br_dev;
+}
+
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ mlxsw_sp->master_bridge.dev = br_dev;
+ mlxsw_sp->master_bridge.ref_count++;
+}
+
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ if (--mlxsw_sp->master_bridge.ref_count == 0)
+ mlxsw_sp->master_bridge.dev = NULL;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ if (!mlxsw_sp_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ /* HW limitation forbids to put ports to multiple bridges. */
+ if (info->master && info->linking &&
+ netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
+ return NOTIFY_BAD;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->master &&
+ netif_is_bridge_master(upper_dev)) {
+ if (info->linking) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to join bridge\n");
+ mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
+ mlxsw_sp_port->bridged = 1;
+ } else {
+ err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to leave bridge\n");
+ mlxsw_sp_port->bridged = 0;
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
+ }
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_netdevice_event,
+};
+
+static int __init mlxsw_sp_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ if (err)
+ goto err_core_driver_register;
+ return 0;
+
+err_core_driver_register:
+ unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ return err;
+}
+
+static void __exit mlxsw_sp_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+}
+
+module_init(mlxsw_sp_module_init);
+module_exit(mlxsw_sp_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Spectrum driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
new file mode 100644
index 000000000000..4365c8bccc6d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -0,0 +1,122 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_H
+#define _MLXSW_SPECTRUM_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "core.h"
+
+#define MLXSW_SP_VFID_BASE VLAN_N_VID
+
+struct mlxsw_sp_port;
+
+struct mlxsw_sp {
+ unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct mlxsw_sp_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ unsigned char base_mac[ETH_ALEN];
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ struct {
+ struct net_device *dev;
+ unsigned int ref_count;
+ } master_bridge;
+};
+
+struct mlxsw_sp_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sp_port {
+ struct net_device *dev;
+ struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sp *mlxsw_sp;
+ u8 local_port;
+ u8 stp_state;
+ u8 learning:1,
+ learning_sync:1,
+ uc_flood:1,
+ bridged:1;
+ u16 pvid;
+ /* 802.1Q bridge VLANs */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /* VLAN interfaces */
+ unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 nr_vfids;
+};
+
+enum mlxsw_sp_flood_table {
+ MLXSW_SP_FLOOD_TABLE_UC,
+ MLXSW_SP_FLOOD_TABLE_BM,
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+ u16 vid);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+ u16 vid);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
new file mode 100644
index 000000000000..d59195e3f7fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -0,0 +1,422 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "port.h"
+#include "reg.h"
+
+struct mlxsw_sp_pb {
+ u8 index;
+ u16 size;
+};
+
+#define MLXSW_SP_PB(_index, _size) \
+ { \
+ .index = _index, \
+ .size = _size, \
+ }
+
+static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
+ MLXSW_SP_PB(0, 208),
+ MLXSW_SP_PB(1, 208),
+ MLXSW_SP_PB(2, 208),
+ MLXSW_SP_PB(3, 208),
+ MLXSW_SP_PB(4, 208),
+ MLXSW_SP_PB(5, 208),
+ MLXSW_SP_PB(6, 208),
+ MLXSW_SP_PB(7, 208),
+ MLXSW_SP_PB(9, 208),
+};
+
+#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+
+static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int i;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
+ 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+ const struct mlxsw_sp_pb *pb;
+
+ pb = &mlxsw_sp_pbs[i];
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+ }
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(pbmc), pbmc_pl);
+}
+
+#define MLXSW_SP_SB_BYTES_PER_CELL 96
+
+struct mlxsw_sp_sb_pool {
+ u8 pool;
+ enum mlxsw_reg_sbpr_dir dir;
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
+ ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \
+ MLXSW_SP_SB_BYTES_PER_CELL)
+#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
+ ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \
+ MLXSW_SP_SB_BYTES_PER_CELL)
+
+#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
+ { \
+ .pool = _pool, \
+ .dir = _dir, \
+ .mode = _mode, \
+ .size = _size, \
+ }
+
+#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
+ MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \
+ MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
+ MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \
+ MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
+ MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
+ MLXSW_SP_SB_POOL_INGRESS(1, 0),
+ MLXSW_SP_SB_POOL_INGRESS(2, 0),
+ MLXSW_SP_SB_POOL_INGRESS(3, 0),
+ MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+ MLXSW_SP_SB_POOL_EGRESS(1, 0),
+ MLXSW_SP_SB_POOL_EGRESS(2, 0),
+ MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+};
+
+#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+
+static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
+ const struct mlxsw_sp_sb_pool *pool;
+
+ pool = &mlxsw_sp_sb_pools[i];
+ mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
+ pool->mode, pool->size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_sb_cm {
+ union {
+ u8 pg;
+ u8 tc;
+ } u;
+ enum mlxsw_reg_sbcm_dir dir;
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \
+ { \
+ .u.pg = _pg_tc, \
+ .dir = _dir, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
+ }
+
+#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
+ MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \
+ _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
+ MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \
+ _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
+ MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
+ MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
+ MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
+ MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+};
+
+#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+};
+
+#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
+ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+
+static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
+{
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < cms_len; i++) {
+ const struct mlxsw_sp_sb_cm *cm;
+
+ cm = &cms[i];
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
+ cm->min_buff, cm->max_buff, cm->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
+ MLXSW_SP_SB_CMS_LEN);
+}
+
+static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
+ MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+}
+
+struct mlxsw_sp_sb_pm {
+ u8 pool;
+ enum mlxsw_reg_sbpm_dir dir;
+ u32 min_buff;
+ u32 max_buff;
+};
+
+#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \
+ { \
+ .pool = _pool, \
+ .dir = _dir, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
+ MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \
+ _min_buff, _max_buff)
+
+#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
+ MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \
+ _min_buff, _max_buff)
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+ MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
+ MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
+ MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
+ MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
+ MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+};
+
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ const struct mlxsw_sp_sb_pm *pm;
+
+ pm = &mlxsw_sp_sb_pms[i];
+ mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
+ pm->pool, pm->dir,
+ pm->min_buff, pm->max_buff);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_sb_mm {
+ u8 prio;
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \
+ { \
+ .prio = _prio, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
+ }
+
+static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
+ MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
+
+static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbmm_pl[MLXSW_REG_SBMM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+ const struct mlxsw_sp_sb_mm *mc;
+
+ mc = &mlxsw_sp_sb_mms[i];
+ mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+ mc->max_buff, mc->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+
+ return err;
+}
+
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
new file mode 100644
index 000000000000..617fb22b5d81
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -0,0 +1,903 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <net/switchdev.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags =
+ (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
+ (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
+ (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ switch (state) {
+ case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_FORWARDING:
+ spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+ break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_LEARNING:
+ spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+ break;
+ case BR_STATE_BLOCKING:
+ spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+ break;
+ default:
+ BUG();
+ }
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ mlxsw_sp_port->stp_state = state;
+ return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end, bool set,
+ bool only_uc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 range = fid_end - fid_begin + 1;
+ char *sftr_pl;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ if (err)
+ goto buffer_out;
+
+ /* Flooding control allows one to decide whether a given port will
+ * flood unicast traffic for which there is no FDB entry.
+ */
+ if (only_uc)
+ goto buffer_out;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+buffer_out:
+ kfree(sftr_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
+ true);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_flood_set;
+ }
+ }
+
+ return 0;
+
+err_port_flood_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+ netdev_err(dev, "Failed to configure unicast flooding\n");
+ return err;
+}
+
+static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long brport_flags)
+{
+ unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ bool set;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if ((uc_flood ^ brport_flags) & BR_FLOOD) {
+ set = mlxsw_sp_port->uc_flood ? false : true;
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
+ mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
+ mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+
+ return 0;
+}
+
+static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
+{
+ char sfdat_pl[MLXSW_REG_SFDAT_LEN];
+ int err;
+
+ mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
+ if (err)
+ return err;
+ mlxsw_sp->ageing_time = ageing_time;
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+}
+
+static int mlxsw_sp_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ attr->u.ageing_time);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ int err;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+ if (err)
+ return err;
+
+ set_bit(fid, mlxsw_sp->active_fids);
+ return 0;
+}
+
+static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ clear_bit(fid, mlxsw_sp->active_fids);
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+ fid, fid);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+ enum mlxsw_reg_svfa_mt mt;
+
+ if (mlxsw_sp_port->nr_vfids)
+ mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ else
+ mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+ enum mlxsw_reg_svfa_mt mt;
+
+ if (!mlxsw_sp_port->nr_vfids)
+ return 0;
+
+ mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
+}
+
+static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
+ u16 vid_end)
+{
+ u16 vid;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ err = mlxsw_sp_port_add_vid(dev, 0, vid);
+ if (err)
+ goto err_port_add_vid;
+ }
+ return 0;
+
+err_port_add_vid:
+ for (vid--; vid >= vid_begin; vid--)
+ mlxsw_sp_port_kill_vid(dev, 0, vid);
+ return err;
+}
+
+static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool flag_untagged, bool flag_pvid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ enum mlxsw_reg_svfa_mt mt;
+ u16 vid, vid_e;
+ int err;
+
+ /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+ * not bridged, then packets ingressing through the port with
+ * the specified VIDs will be directed to CPU.
+ */
+ if (!mlxsw_sp_port->bridged)
+ return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ if (!test_bit(vid, mlxsw_sp->active_fids)) {
+ err = mlxsw_sp_fid_create(mlxsw_sp, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create FID=%d\n",
+ vid);
+ return err;
+ }
+
+ /* When creating a FID, we set a VID to FID mapping
+ * regardless of the port's mode.
+ */
+ mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
+ true, vid, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
+ vid);
+ return err;
+ }
+ }
+
+ /* Set FID mapping according to port's mode */
+ err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to map FID=%d", vid);
+ return err;
+ }
+ }
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, false);
+ if (err) {
+ netdev_err(dev, "Failed to configure flooding\n");
+ return err;
+ }
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
+ flag_untagged);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
+ vid, vid_e);
+ return err;
+ }
+ }
+
+ vid = vid_begin;
+ if (flag_pvid && mlxsw_sp_port->pvid != vid) {
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
+ vid);
+ return err;
+ }
+ mlxsw_sp_port->pvid = vid;
+ }
+
+ /* Changing activity bits only if HW operation succeded */
+ for (vid = vid_begin; vid <= vid_end; vid++)
+ set_bit(vid, mlxsw_sp_port->active_vlans);
+
+ return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
+ mlxsw_sp_port->stp_state);
+}
+
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+ vlan->vid_begin, vlan->vid_end,
+ untagged_flag, pvid_flag);
+}
+
+static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
+ const char *mac, u16 vid, bool adding,
+ bool dynamic)
+{
+ enum mlxsw_reg_sfd_rec_policy policy;
+ enum mlxsw_reg_sfd_op op;
+ char *sfd_pl;
+ int err;
+
+ if (!vid)
+ vid = mlxsw_sp_port->pvid;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
+ MLXSW_REG_SFD_OP_WRITE_REMOVE;
+ mlxsw_reg_sfd_pack(sfd_pl, op, 0);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
+ mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_port->local_port);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
+ sfd_pl);
+ kfree(sfd_pl);
+
+ return err;
+}
+
+static int
+mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+ true, false);
+}
+
+static int mlxsw_sp_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj),
+ trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
+ u16 vid_end)
+{
+ u16 vid;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ err = mlxsw_sp_port_kill_vid(dev, 0, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end, bool init)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, vid_e;
+ int err;
+
+ /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+ * not bridged, then prevent packets ingressing through the
+ * port with the specified VIDs from being trapped to CPU.
+ */
+ if (!init && !mlxsw_sp_port->bridged)
+ return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
+ false);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
+ vid, vid_e);
+ return err;
+ }
+ }
+
+ if ((mlxsw_sp_port->pvid >= vid_begin) &&
+ (mlxsw_sp_port->pvid <= vid_end)) {
+ /* Default VLAN is always 1 */
+ mlxsw_sp_port->pvid = 1;
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
+ mlxsw_sp_port->pvid);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
+ vid);
+ return err;
+ }
+ }
+
+ if (init)
+ goto out;
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
+ if (err) {
+ netdev_err(dev, "Failed to clear flooding\n");
+ return err;
+ }
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ /* Remove FID mapping in case of Virtual mode */
+ err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to unmap FID=%d", vid);
+ return err;
+ }
+ }
+
+out:
+ /* Changing activity bits only if HW operation succeded */
+ for (vid = vid_begin; vid <= vid_end; vid++)
+ clear_bit(vid, mlxsw_sp_port->active_vlans);
+
+ return 0;
+}
+
+static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ vlan->vid_begin, vlan->vid_end, false);
+}
+
+static int
+mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+ false, false);
+}
+
+static int mlxsw_sp_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ char *sfd_pl;
+ char mac[ETH_ALEN];
+ u16 vid;
+ u8 local_port;
+ u8 num_rec;
+ int stored_err = 0;
+ int i;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
+ do {
+ mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
+ err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+
+ /* Even in case of error, we have to run the dump to the end
+ * so the session in firmware is finished.
+ */
+ if (stored_err)
+ continue;
+
+ for (i = 0; i < num_rec; i++) {
+ switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
+ case MLXSW_REG_SFD_REC_TYPE_UNICAST:
+ mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
+ &local_port);
+ if (local_port == mlxsw_sp_port->local_port) {
+ ether_addr_copy(fdb->addr, mac);
+ fdb->ndm_state = NUD_REACHABLE;
+ fdb->vid = vid;
+ err = cb(&fdb->obj);
+ if (err)
+ stored_err = err;
+ }
+ }
+ }
+ } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
+
+out:
+ kfree(sfd_pl);
+ return stored_err ? stored_err : err;
+}
+
+static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ u16 vid;
+ int err = 0;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ vlan->flags = 0;
+ if (vid == mlxsw_sp_port->pvid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+ vlan->vid_begin = vid;
+ vlan->vid_end = vid;
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static int mlxsw_sp_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
+ .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
+ .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
+ .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
+ .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
+};
+
+static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index,
+ bool adding)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ char mac[ETH_ALEN];
+ u8 local_port;
+ u16 vid;
+ int err;
+
+ mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
+ return;
+ }
+
+ err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
+ adding && mlxsw_sp_port->learning, true);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+ return;
+ }
+
+ if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
+ struct switchdev_notifier_fdb_info info;
+ unsigned long notifier_type;
+
+ info.addr = mac;
+ info.vid = vid;
+ notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
+ call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
+ &info.info);
+ }
+}
+
+static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index)
+{
+ switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
+ msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+}
+
+static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ char *sfn_pl;
+ u8 num_rec;
+ int i;
+ int err;
+
+ sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
+ if (!sfn_pl)
+ return;
+
+ mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+
+ do {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ break;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+
+ } while (num_rec);
+
+ kfree(sfn_pl);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+}
+
+static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
+ return err;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ return 0;
+}
+
+static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+}
+
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 fid;
+
+ for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
+ mlxsw_sp_fid_destroy(mlxsw_sp, fid);
+}
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fdb_init(mlxsw_sp);
+}
+
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_fdb_fini(mlxsw_sp);
+ mlxsw_sp_fids_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Allow only untagged packets to ingress and tag them internally
+ * with VID 1.
+ */
+ mlxsw_sp_port->pvid = 1;
+ err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
+ if (err) {
+ netdev_err(dev, "Unable to init VLANs\n");
+ return err;
+ }
+
+ /* Add implicit VLAN interface in the device, so that untagged
+ * packets will be classified to the default vFID.
+ */
+ err = mlxsw_sp_port_add_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to configure default vFID\n");
+
+ return err;
+}
+
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
+}
+
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 62cbbd1ada8d..d85960cfb694 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0";
struct mlxsw_sx_port;
-#define MLXSW_SW_HW_ID_LEN 6
-
struct mlxsw_sx {
struct mlxsw_sx_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
- u8 hw_id[MLXSW_SW_HW_ID_LEN];
+ u8 hw_id[ETH_ALEN];
};
struct mlxsw_sx_port_pcpu_stats {
@@ -868,7 +866,7 @@ static int mlxsw_sx_port_attr_get(struct net_device *dev,
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
switch (attr->id) {
- case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
break;
@@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
kfree(spms_pl);
return err;
@@ -1148,7 +1147,7 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
}
status = mlxsw_reg_pude_oper_status_get(pude_pl);
- if (MLXSW_PORT_OPER_STATUS_UP == status) {
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
netdev_info(mlxsw_sx_port->dev, "link up\n");
netif_carrier_on(mlxsw_sx_port->dev);
} else {
@@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
if (err)
return err;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
goto err_event_trap_set;
@@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
if (unlikely(!mlxsw_sx_port)) {
- if (net_ratelimit())
- dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
- local_port);
+ dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
return;
}
@@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
if (err)
return err;
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
&mlxsw_sx_rx_listener[i],
@@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
goto err_rx_listener_register;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
@@ -1339,7 +1340,6 @@ err_rx_trap_set:
err_rx_listener_register:
for (i--; i >= 0; i--) {
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -1371,25 +1370,15 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
{
char sfgc_pl[MLXSW_REG_SFGC_LEN];
char sgcr_pl[MLXSW_REG_SGCR_LEN];
- char *smid_pl;
char *sftr_pl;
int err;
- /* Due to FW bug, we must configure SMID. */
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
- return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
- if (err)
- return err;
-
/* Configure a flooding table, which includes only CPU port. */
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+ mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
+ MLXSW_PORT_CPU_PORT, true);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
kfree(sftr_pl);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index 06fc46c78a0b..fdf94720ca62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -38,6 +38,7 @@
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
+#define MLXSW_TXHDR_VERSION_1 1
enum {
MLXSW_TXHDR_ETH_CTL,