aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-07-21 22:46:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-07-21 22:46:01 -0700
commit15ba2236f3556fc01b9ca91394465152b5ea74b6 (patch)
treed272d8227f618aab5e63075a8aa86932f3c89e50 /drivers/net
parent89faa06ec4229b27e339891df69b4d92f29ab899 (diff)
parent850717ef00d8a224cf1aaffc9c636ea67e01cce2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Null termination fix in dns_resolver got the pointer dereferncing wrong, fix from Ben Hutchings. 2) ip_options_compile() has a benign but real buffer overflow when parsing options. From Eric Dumazet. 3) Table updates can crash in netfilter's nftables if none of the state flags indicate an actual change, from Pablo Neira Ayuso. 4) Fix race in nf_tables dumping, also from Pablo. 5) GRE-GRO support broke the forwarding path because the segmentation state was not fully initialized in these paths, from Jerry Chu. 6) sunvnet driver leaks objects and potentially crashes on module unload, from Sowmini Varadhan. 7) We can accidently generate the same handle for several u32 classifier filters, fix from Cong Wang. 8) Several edge case bug fixes in fragment handling in xen-netback, from Zoltan Kiss. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (21 commits) ipv4: fix buffer overflow in ip_options_compile() batman-adv: fix TT VLAN inconsistency on VLAN re-add batman-adv: drop QinQ claim frames in bridge loop avoidance dns_resolver: Null-terminate the right string xen-netback: Fix pointer incrementation to avoid incorrect logging xen-netback: Fix releasing header slot on error path xen-netback: Fix releasing frag_list skbs in error path xen-netback: Fix handling frag_list on grant op error path net_sched: avoid generating same handle for u32 filters net: huawei_cdc_ncm: add "subclass 3" devices net: qmi_wwan: add two Sierra Wireless/Netgear devices wan/x25_asy: integer overflow in x25_asy_change_mtu() net: ppp: fix creating PPP pass and active filters net/mlx4_en: cq->irq_desc wasn't set in legacy EQ's sunvnet: clean up objects created in vnet_new() on vnet_exit() r8169: Enable RX_MULTI_EN for RTL_GIGA_MAC_VER_40 net-gre-gro: Fix a bug that breaks the forwarding path netfilter: nf_tables: 64bit stats need some extra synchronization netfilter: nf_tables: set NLM_F_DUMP_INTR if netlink dumping is stale netfilter: nf_tables: safe RCU iteration on list when dumping ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c20
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/xen-netback/netback.c86
8 files changed, 114 insertions, 34 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 14c00048bbec..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -129,14 +129,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
name);
}
- cq->irq_desc =
- irq_to_desc(mlx4_eq_get_irq(mdev->dev,
- cq->vector));
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
mdev->dev->caps.num_comp_vectors;
}
+
+ cq->irq_desc =
+ irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+ cq->vector));
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 06bdc31a828d..61623e9af574 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4240,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..fd411d6e19a2 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
return vp;
}
+static void vnet_cleanup(void)
+{
+ struct vnet *vp;
+ struct net_device *dev;
+
+ mutex_lock(&vnet_list_mutex);
+ while (!list_empty(&vnet_list)) {
+ vp = list_first_entry(&vnet_list, struct vnet, list);
+ list_del(&vp->list);
+ dev = vp->dev;
+ /* vio_unregister_driver() should have cleaned up port_list */
+ BUG_ON(!list_empty(&vp->port_list));
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ mutex_unlock(&vnet_list_mutex);
+}
+
static const char *local_mac_prop = "local-mac-address";
static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
kfree(port);
- unregister_netdev(vp->dev);
}
return 0;
}
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
+ vnet_cleanup();
}
module_init(vnet_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e2f20f807de8..d5b77ef3a210 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
};
ppp_lock(ppp);
- if (ppp->pass_filter)
+ if (ppp->pass_filter) {
sk_unattached_filter_destroy(ppp->pass_filter);
- err = sk_unattached_filter_create(&ppp->pass_filter,
- &fprog);
+ ppp->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
@@ -778,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
};
ppp_lock(ppp);
- if (ppp->active_filter)
+ if (ppp->active_filter) {
sk_unattached_filter_destroy(ppp->active_filter);
- err = sk_unattached_filter_create(&ppp->active_filter,
- &fprog);
+ ppp->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
/* Terminating entry */
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c4638c67f6b9..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -757,6 +758,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f1978691..fa9fdfa128c1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
{
struct x25_asy *sl = netdev_priv(dev);
unsigned char *xbuff, *rbuff;
- int len = 2 * newmtu;
+ int len;
+ if (newmtu > 65534)
+ return -EINVAL;
+
+ len = 2 * newmtu;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ /* This always points to the shinfo of the skb being checked, which
+ * could be either the first or the one on the frag_list
+ */
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ /* If this is non-NULL, we are currently checking the frag_list skb, and
+ * this points to the shinfo of the first one
+ */
+ struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
+ const bool sharedslot = nr_frags &&
+ frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err;
- struct sk_buff *first_skb = NULL;
/* Check status of header. */
err = (*gopp_copy)->status;
- (*gopp_copy)++;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+ /* The first frag might still have this slot mapped */
+ if (!sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
}
+ (*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
- if (unlikely(err))
+ if (unlikely(err)) {
xenvif_idx_unmap(queue, pending_idx);
+ /* If the mapping of the first frag was OK, but
+ * the header's copy failed, and they are
+ * sharing a slot, send an error
+ */
+ if (i == 0 && sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
+ else
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
continue;
}
@@ -1075,42 +1097,53 @@ check_frags:
gop_map->status,
pending_idx,
gop_map->ref);
+
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
- /* First error: invalidate preceding fragments. */
+
+ /* First error: if the header haven't shared a slot with the
+ * first frag, release it as well.
+ */
+ if (!sharedslot)
+ xenvif_idx_release(queue,
+ XENVIF_TX_CB(skb)->pending_idx,
+ XEN_NETIF_RSP_OKAY);
+
+ /* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
+
+ /* And if we found the error while checking the frag_list, unmap
+ * the first skb's frags
+ */
+ if (first_shinfo) {
+ for (j = 0; j < first_shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+ xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
- if (skb_has_frag_list(skb)) {
- first_skb = skb;
- skb = shinfo->frag_list;
- shinfo = skb_shinfo(skb);
+ if (skb_has_frag_list(skb) && !first_shinfo) {
+ first_shinfo = skb_shinfo(skb);
+ shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
nr_frags = shinfo->nr_frags;
goto check_frags;
}
- /* There was a mapping error in the frag_list skb. We have to unmap
- * the first skb's frags
- */
- if (first_skb && err) {
- int j;
- shinfo = skb_shinfo(first_skb);
- for (j = 0; j < shinfo->nr_frags; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(queue, pending_idx);
- }
- }
-
*gopp_map = gop_map;
return err;
}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
/* Check the remap error code. */
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+ /* If there was an error, xenvif_tx_check_gop is
+ * expected to release all the frags which were mapped,
+ * so kfree_skb shouldn't do it again
+ */
skb_shinfo(skb)->nr_frags = 0;
+ if (skb_has_frag_list(skb)) {
+ struct sk_buff *nskb =
+ skb_shinfo(skb)->frag_list;
+ skb_shinfo(nskb)->nr_frags = 0;
+ }
kfree_skb(skb);
continue;
}
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
tx_unmap_op.status);
BUG();
}
-
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
}
static inline int rx_work_todo(struct xenvif_queue *queue)