aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/team/team.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 13:38:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 13:38:27 -0700
commitaecdc33e111b2c447b622e287c6003726daa1426 (patch)
tree3e7657eae4b785e1a1fb5dfb225dbae0b2f0cfc6 /drivers/net/team/team.c
parenta20acf99f75e49271381d65db097c9763060a1e8 (diff)
parenta3a6cab5ea10cca64d036851fe0d932448f2fe4f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) GRE now works over ipv6, from Dmitry Kozlov. 2) Make SCTP more network namespace aware, from Eric Biederman. 3) TEAM driver now works with non-ethernet devices, from Jiri Pirko. 4) Make openvswitch network namespace aware, from Pravin B Shelar. 5) IPV6 NAT implementation, from Patrick McHardy. 6) Server side support for TCP Fast Open, from Jerry Chu and others. 7) Packet BPF filter supports MOD and XOR, from Eric Dumazet and Daniel Borkmann. 8) Increate the loopback default MTU to 64K, from Eric Dumazet. 9) Use a per-task rather than per-socket page fragment allocator for outgoing networking traffic. This benefits processes that have very many mostly idle sockets, which is quite common. From Eric Dumazet. 10) Use up to 32K for page fragment allocations, with fallbacks to smaller sizes when higher order page allocations fail. Benefits are a) less segments for driver to process b) less calls to page allocator c) less waste of space. From Eric Dumazet. 11) Allow GRO to be used on GRE tunnels, from Eric Dumazet. 12) VXLAN device driver, one way to handle VLAN issues such as the limitation of 4096 VLAN IDs yet still have some level of isolation. From Stephen Hemminger. 13) As usual there is a large boatload of driver changes, with the scale perhaps tilted towards the wireless side this time around. Fix up various fairly trivial conflicts, mostly caused by the user namespace changes. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1012 commits) hyperv: Add buffer for extended info after the RNDIS response message. hyperv: Report actual status in receive completion packet hyperv: Remove extra allocated space for recv_pkt_list elements hyperv: Fix page buffer handling in rndis_filter_send_request() hyperv: Fix the missing return value in rndis_filter_set_packet_filter() hyperv: Fix the max_xfer_size in RNDIS initialization vxlan: put UDP socket in correct namespace vxlan: Depend on CONFIG_INET sfc: Fix the reported priorities of different filter types sfc: Remove EFX_FILTER_FLAG_RX_OVERRIDE_IP sfc: Fix loopback self-test with separate_tx_channels=1 sfc: Fix MCDI structure field lookup sfc: Add parentheses around use of bitfield macro arguments sfc: Fix null function pointer in efx_sriov_channel_type vxlan: virtual extensible lan igmp: export symbol ip_mc_leave_group netlink: add attributes to fdb interface tg3: unconditionally select HWMON support when tg3 is enabled. Revert "net: ti cpsw ethernet: allow reading phy interface mode from DT" gre: fix sparse warning ...
Diffstat (limited to 'drivers/net/team/team.c')
-rw-r--r--drivers/net/team/team.c342
1 files changed, 295 insertions, 47 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f8cd61f449a..5c7547c4f80 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
}
/*
- * Since the ability to change mac address for open port device is tested in
+ * Since the ability to change device address for open port device is tested in
* team_port_add, this function can be called without control of return value
*/
-static int __set_port_mac(struct net_device *port_dev,
- const unsigned char *dev_addr)
+static int __set_port_dev_addr(struct net_device *port_dev,
+ const unsigned char *dev_addr)
{
struct sockaddr addr;
- memcpy(addr.sa_data, dev_addr, ETH_ALEN);
- addr.sa_family = ARPHRD_ETHER;
+ memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
+ addr.sa_family = port_dev->type;
return dev_set_mac_address(port_dev, &addr);
}
-static int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_dev_addr(struct team_port *port)
{
- return __set_port_mac(port->dev, port->orig.dev_addr);
+ return __set_port_dev_addr(port->dev, port->orig.dev_addr);
}
-int team_port_set_team_mac(struct team_port *port)
+int team_port_set_team_dev_addr(struct team_port *port)
{
- return __set_port_mac(port->dev, port->team->dev->dev_addr);
+ return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
}
-EXPORT_SYMBOL(team_port_set_team_mac);
+EXPORT_SYMBOL(team_port_set_team_dev_addr);
static void team_refresh_port_linkup(struct team_port *port)
{
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
}
+/*************************************
+ * Multiqueue Tx port select override
+ *************************************/
+
+static int team_queue_override_init(struct team *team)
+{
+ struct list_head *listarr;
+ unsigned int queue_cnt = team->dev->num_tx_queues - 1;
+ unsigned int i;
+
+ if (!queue_cnt)
+ return 0;
+ listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+ if (!listarr)
+ return -ENOMEM;
+ team->qom_lists = listarr;
+ for (i = 0; i < queue_cnt; i++)
+ INIT_LIST_HEAD(listarr++);
+ return 0;
+}
+
+static void team_queue_override_fini(struct team *team)
+{
+ kfree(team->qom_lists);
+}
+
+static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
+{
+ return &team->qom_lists[queue_id - 1];
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct list_head *qom_list;
+ struct team_port *port;
+
+ if (!team->queue_override_enabled || !skb->queue_mapping)
+ return false;
+ qom_list = __team_get_qom_list(team, skb->queue_mapping);
+ list_for_each_entry_rcu(port, qom_list, qom_list) {
+ if (!team_dev_queue_xmit(team, port, skb))
+ return true;
+ }
+ return false;
+}
+
+static void __team_queue_override_port_del(struct team *team,
+ struct team_port *port)
+{
+ list_del_rcu(&port->qom_list);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&port->qom_list);
+}
+
+static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
+ struct team_port *cur)
+{
+ if (port->priority < cur->priority)
+ return true;
+ if (port->priority > cur->priority)
+ return false;
+ if (port->index < cur->index)
+ return true;
+ return false;
+}
+
+static void __team_queue_override_port_add(struct team *team,
+ struct team_port *port)
+{
+ struct team_port *cur;
+ struct list_head *qom_list;
+ struct list_head *node;
+
+ if (!port->queue_id || !team_port_enabled(port))
+ return;
+
+ qom_list = __team_get_qom_list(team, port->queue_id);
+ node = qom_list;
+ list_for_each_entry(cur, qom_list, qom_list) {
+ if (team_queue_override_port_has_gt_prio_than(port, cur))
+ break;
+ node = &cur->qom_list;
+ }
+ list_add_tail_rcu(&port->qom_list, node);
+}
+
+static void __team_queue_override_enabled_check(struct team *team)
+{
+ struct team_port *port;
+ bool enabled = false;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ if (!list_empty(&port->qom_list)) {
+ enabled = true;
+ break;
+ }
+ }
+ if (enabled == team->queue_override_enabled)
+ return;
+ netdev_dbg(team->dev, "%s queue override\n",
+ enabled ? "Enabling" : "Disabling");
+ team->queue_override_enabled = enabled;
+}
+
+static void team_queue_override_port_refresh(struct team *team,
+ struct team_port *port)
+{
+ __team_queue_override_port_del(team, port);
+ __team_queue_override_port_add(team, port);
+ __team_queue_override_enabled_check(team);
+}
+
+
/****************
* Port handling
****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
team_adjust_ops(team);
+ team_queue_override_port_refresh(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
}
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
hlist_del_rcu(&port->hlist);
__reconstruct_port_hlist(team, port->index);
port->index = -1;
+ team_queue_override_port_refresh(team, port);
__team_adjust_ops(team, team->en_port_count - 1);
/*
* Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
#endif
static void __team_port_change_port_added(struct team_port *port, bool linkup);
+static int team_dev_type_check_change(struct net_device *dev,
+ struct net_device *port_dev);
static int team_port_add(struct team *team, struct net_device *port_dev)
{
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
char *portname = port_dev->name;
int err;
- if (port_dev->flags & IFF_LOOPBACK ||
- port_dev->type != ARPHRD_ETHER) {
- netdev_err(dev, "Device %s is of an unsupported type\n",
+ if (port_dev->flags & IFF_LOOPBACK) {
+ netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
portname);
return -EINVAL;
}
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
+ if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+ vlan_uses_dev(dev)) {
+ netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+ portname);
+ return -EPERM;
+ }
+
+ err = team_dev_type_check_change(dev, port_dev);
+ if (err)
+ return err;
+
if (port_dev->flags & IFF_UP) {
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
port->dev = port_dev;
port->team = team;
+ INIT_LIST_HEAD(&port->qom_list);
port->orig.mtu = port_dev->mtu;
err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_set_mtu;
}
- memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
err = team_port_enter(team, port);
if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
err_dev_open:
team_port_leave(team, port);
- team_port_set_orig_mac(port);
+ team_port_set_orig_dev_addr(port);
err_port_enter:
dev_set_mtu(port_dev, port->orig.mtu);
@@ -1010,7 +1141,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
vlan_vids_del_by_dev(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
- team_port_set_orig_mac(port);
+ team_port_set_orig_dev_addr(port);
dev_set_mtu(port_dev, port->orig.mtu);
synchronize_rcu();
kfree(port);
@@ -1095,6 +1226,49 @@ static int team_user_linkup_en_option_set(struct team *team,
return 0;
}
+static int team_priority_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.s32_val = port->priority;
+ return 0;
+}
+
+static int team_priority_option_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ port->priority = ctx->data.s32_val;
+ team_queue_override_port_refresh(team, port);
+ return 0;
+}
+
+static int team_queue_id_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.u32_val = port->queue_id;
+ return 0;
+}
+
+static int team_queue_id_option_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ if (port->queue_id == ctx->data.u32_val)
+ return 0;
+ if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+ return -EINVAL;
+ port->queue_id = ctx->data.u32_val;
+ team_queue_override_port_refresh(team, port);
+ return 0;
+}
+
+
static const struct team_option team_options[] = {
{
.name = "mode",
@@ -1123,6 +1297,20 @@ static const struct team_option team_options[] = {
.getter = team_user_linkup_en_option_get,
.setter = team_user_linkup_en_option_set,
},
+ {
+ .name = "priority",
+ .type = TEAM_OPTION_TYPE_S32,
+ .per_port = true,
+ .getter = team_priority_option_get,
+ .setter = team_priority_option_set,
+ },
+ {
+ .name = "queue_id",
+ .type = TEAM_OPTION_TYPE_U32,
+ .per_port = true,
+ .getter = team_queue_id_option_get,
+ .setter = team_queue_id_option_set,
+ },
};
static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1158,6 +1346,9 @@ static int team_init(struct net_device *dev)
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
+ err = team_queue_override_init(team);
+ if (err)
+ goto err_team_queue_override_init;
team_adjust_ops(team);
@@ -1173,6 +1364,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
+ team_queue_override_fini(team);
+err_team_queue_override_init:
free_percpu(team->pcpu_stats);
return err;
@@ -1190,6 +1383,7 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
@@ -1219,10 +1413,12 @@ static int team_close(struct net_device *dev)
static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct team *team = netdev_priv(dev);
- bool tx_success = false;
+ bool tx_success;
unsigned int len = skb->len;
- tx_success = team->ops.transmit(team, skb);
+ tx_success = team_queue_override_transmit(team, skb);
+ if (!tx_success)
+ tx_success = team->ops.transmit(team, skb);
if (tx_success) {
struct team_pcpu_stats *pcpu_stats;
@@ -1296,17 +1492,18 @@ static void team_set_rx_mode(struct net_device *dev)
static int team_set_mac_address(struct net_device *dev, void *p)
{
+ struct sockaddr *addr = p;
struct team *team = netdev_priv(dev);
struct team_port *port;
- int err;
- err = eth_mac_addr(dev, p);
- if (err)
- return err;
+ if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
- if (team->ops.port_change_mac)
- team->ops.port_change_mac(team, port);
+ if (team->ops.port_change_dev_addr)
+ team->ops.port_change_dev_addr(team, port);
rcu_read_unlock();
return 0;
}
@@ -1537,6 +1734,45 @@ static const struct net_device_ops team_netdev_ops = {
* rt netlink interface
***********************/
+static void team_setup_by_port(struct net_device *dev,
+ struct net_device *port_dev)
+{
+ dev->header_ops = port_dev->header_ops;
+ dev->type = port_dev->type;
+ dev->hard_header_len = port_dev->hard_header_len;
+ dev->addr_len = port_dev->addr_len;
+ dev->mtu = port_dev->mtu;
+ memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+ memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+}
+
+static int team_dev_type_check_change(struct net_device *dev,
+ struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ char *portname = port_dev->name;
+ int err;
+
+ if (dev->type == port_dev->type)
+ return 0;
+ if (!list_empty(&team->port_list)) {
+ netdev_err(dev, "Device %s is of different type\n", portname);
+ return -EBUSY;
+ }
+ err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
+ err = notifier_to_errno(err);
+ if (err) {
+ netdev_err(dev, "Refused to change device type\n");
+ return err;
+ }
+ dev_uc_flush(dev);
+ dev_mc_flush(dev);
+ team_setup_by_port(dev, port_dev);
+ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+ return 0;
+}
+
static void team_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -1651,7 +1887,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
&team_nl_family, 0, TEAM_CMD_NOOP);
if (!hdr) {
err = -EMSGSIZE;
@@ -1660,7 +1896,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
genlmsg_end(msg, hdr);
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
err_msg_put:
nlmsg_free(msg);
@@ -1717,7 +1953,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
if (err < 0)
goto err_fill;
- err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
return err;
err_fill:
@@ -1726,11 +1962,11 @@ err_fill:
}
typedef int team_nl_send_func_t(struct sk_buff *skb,
- struct team *team, u32 pid);
+ struct team *team, u32 portid);
-static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
{
- return genlmsg_unicast(dev_net(team->dev), skb, pid);
+ return genlmsg_unicast(dev_net(team->dev), skb, portid);
}
static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
@@ -1790,6 +2026,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
goto nest_cancel;
break;
+ case TEAM_OPTION_TYPE_S32:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
+ goto nest_cancel;
+ if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
+ goto nest_cancel;
+ break;
default:
BUG();
}
@@ -1809,13 +2051,13 @@ nest_cancel:
}
static int __send_and_alloc_skb(struct sk_buff **pskb,
- struct team *team, u32 pid,
+ struct team *team, u32 portid,
team_nl_send_func_t *send_func)
{
int err;
if (*pskb) {
- err = send_func(*pskb, team, pid);
+ err = send_func(*pskb, team, portid);
if (err)
return err;
}
@@ -1825,7 +2067,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb,
return 0;
}
-static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
int flags, team_nl_send_func_t *send_func,
struct list_head *sel_opt_inst_list)
{
@@ -1842,11 +2084,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
struct team_option_inst, tmp_list);
start_again:
- err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
return err;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
if (!hdr)
return -EMSGSIZE;
@@ -1879,15 +2121,15 @@ start_again:
goto start_again;
send_done:
- nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
if (!nlh) {
- err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
goto errout;
goto send_done;
}
- return send_func(skb, team, pid);
+ return send_func(skb, team, portid);
nla_put_failure:
err = -EMSGSIZE;
@@ -1910,7 +2152,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
list_for_each_entry(opt_inst, &team->option_inst_list, list)
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
- err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+ err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
NLM_F_ACK, team_nl_send_unicast,
&sel_opt_inst_list);
@@ -1978,6 +2220,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
case NLA_FLAG:
opt_type = TEAM_OPTION_TYPE_BOOL;
break;
+ case NLA_S32:
+ opt_type = TEAM_OPTION_TYPE_S32;
+ break;
default:
goto team_put;
}
@@ -2034,6 +2279,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
case TEAM_OPTION_TYPE_BOOL:
ctx.data.bool_val = attr_data ? true : false;
break;
+ case TEAM_OPTION_TYPE_S32:
+ ctx.data.s32_val = nla_get_s32(attr_data);
+ break;
default:
BUG();
}
@@ -2058,7 +2306,7 @@ team_put:
}
static int team_nl_fill_port_list_get(struct sk_buff *skb,
- u32 pid, u32 seq, int flags,
+ u32 portid, u32 seq, int flags,
struct team *team,
bool fillall)
{
@@ -2066,7 +2314,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
void *hdr;
struct team_port *port;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
TEAM_CMD_PORT_LIST_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2115,7 +2363,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
struct genl_info *info, int flags,
struct team *team)
{
- return team_nl_fill_port_list_get(skb, info->snd_pid,
+ return team_nl_fill_port_list_get(skb, info->snd_portid,
info->snd_seq, NLM_F_ACK,
team, true);
}
@@ -2168,7 +2416,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
};
static int team_nl_send_multicast(struct sk_buff *skb,
- struct team *team, u32 pid)
+ struct team *team, u32 portid)
{
return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
team_change_event_mcgrp.id, GFP_KERNEL);
@@ -2246,7 +2494,7 @@ static void __team_options_change_check(struct team *team)
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
}
err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
- if (err)
+ if (err && err != -ESRCH)
netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
err);
}
@@ -2275,9 +2523,9 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
send_event:
err = team_nl_send_event_port_list_get(port->team);
- if (err)
- netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
- port->dev->name);
+ if (err && err != -ESRCH)
+ netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
+ port->dev->name, err);
}