aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/net/usbnet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/net/usbnet.c')
-rw-r--r--drivers/usb/net/usbnet.c44
1 files changed, 34 insertions, 10 deletions
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 54183e173a6d..98a522f1e264 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -61,8 +61,11 @@
* let the USB host controller be busy for 5msec or more before an irq
* is required, under load. Jumbograms change the equation.
*/
-#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? 60 : 4)
-#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? 60 : 4)
+#define RX_MAX_QUEUE_MEMORY (60 * 1518)
+#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
+ (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
+#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
+ (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
// reawaken network queue this soon after stopping; else watchdog barks
#define TX_TIMEOUT_JIFFIES (5*HZ)
@@ -227,13 +230,23 @@ static int usbnet_change_mtu (struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len;
+ int old_hard_mtu = dev->hard_mtu;
+ int old_rx_urb_size = dev->rx_urb_size;
- if (new_mtu <= 0 || ll_mtu > dev->hard_mtu)
+ if (new_mtu <= 0)
return -EINVAL;
// no second zero-length packet read wanted after mtu-sized packets
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
net->mtu = new_mtu;
+
+ dev->hard_mtu = net->mtu + net->hard_header_len;
+ if (dev->rx_urb_size == old_hard_mtu) {
+ dev->rx_urb_size = dev->hard_mtu;
+ if (dev->rx_urb_size > old_rx_urb_size)
+ usbnet_unlink_rx_urbs(dev);
+ }
+
return 0;
}
@@ -412,9 +425,9 @@ static void rx_complete (struct urb *urb, struct pt_regs *regs)
// we get controller i/o faults during khubd disconnect() delays.
// throttle down resubmits, to avoid log floods; just temporarily,
// so we still recover when the fault isn't a khubd delay.
- case -EPROTO: // ehci
- case -ETIMEDOUT: // ohci
- case -EILSEQ: // uhci
+ case -EPROTO:
+ case -ETIME:
+ case -EILSEQ:
dev->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
@@ -521,6 +534,17 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
return count;
}
+// Flush all pending rx urbs
+// minidrivers may need to do this when the MTU changes
+
+void usbnet_unlink_rx_urbs(struct usbnet *dev)
+{
+ if (netif_running(dev->net)) {
+ (void) unlink_urbs (dev, &dev->rxq);
+ tasklet_schedule(&dev->bh);
+ }
+}
+EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
/*-------------------------------------------------------------------------*/
@@ -629,7 +653,7 @@ static int usbnet_open (struct net_device *net)
devinfo (dev, "open: enable queueing "
"(rx %d, tx %d) mtu %d %s framing",
- RX_QLEN (dev), TX_QLEN (dev), dev->net->mtu,
+ (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
framing);
}
@@ -797,9 +821,9 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
// like rx, tx gets controller i/o faults during khubd delays
// and so it uses the same throttling mechanism.
- case -EPROTO: // ehci
- case -ETIMEDOUT: // ohci
- case -EILSEQ: // uhci
+ case -EPROTO:
+ case -ETIME:
+ case -EILSEQ:
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay,
jiffies + THROTTLE_JIFFIES);