blob: 44df8581b4d7c363cd314e90431ed62a635e768f [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Josh Boyerf35f76e2014-01-05 10:24:01 -050037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
46int xenvif_schedulable(struct xenvif *vif)
47{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
49}
50
Wei Liue1f00a692013-05-22 06:34:45 +000051static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000052{
53 struct xenvif *vif = dev_id;
54
Wei Liub3f980b2013-08-26 12:59:38 +010055 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
56 napi_schedule(&vif->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000057
Wei Liue1f00a692013-05-22 06:34:45 +000058 return IRQ_HANDLED;
59}
60
Wei Liub3f980b2013-08-26 12:59:38 +010061static int xenvif_poll(struct napi_struct *napi, int budget)
62{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
64 int work_done;
65
Wei Liu73764192013-08-26 12:59:39 +010066 work_done = xenvif_tx_action(vif, budget);
Wei Liub3f980b2013-08-26 12:59:38 +010067
68 if (work_done < budget) {
69 int more_to_do = 0;
70 unsigned long flags;
71
72 /* It is necessary to disable IRQ before calling
73 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
74 * lose event from the frontend.
75 *
76 * Consider:
77 * RING_HAS_UNCONSUMED_REQUESTS
78 * <frontend generates event to trigger napi_schedule>
79 * __napi_complete
80 *
81 * This handler is still in scheduled state so the
82 * event has no effect at all. After __napi_complete
83 * this handler is descheduled and cannot get
84 * scheduled again. We lose event in this case and the ring
85 * will be completely stalled.
86 */
87
88 local_irq_save(flags);
89
90 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000091 if (!(more_to_do &&
92 xenvif_tx_pending_slots_available(vif)))
Wei Liub3f980b2013-08-26 12:59:38 +010093 __napi_complete(napi);
94
95 local_irq_restore(flags);
96 }
97
98 return work_done;
99}
100
Wei Liue1f00a692013-05-22 06:34:45 +0000101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
102{
103 struct xenvif *vif = dev_id;
104
Paul Durrantca2f09f2013-12-06 16:36:07 +0000105 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000106
107 return IRQ_HANDLED;
108}
109
Wei Liue1f00a692013-05-22 06:34:45 +0000110static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
111{
112 xenvif_tx_interrupt(irq, dev_id);
113 xenvif_rx_interrupt(irq, dev_id);
114
115 return IRQ_HANDLED;
116}
117
Ian Campbellf942dc22011-03-15 00:06:18 +0000118static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
119{
120 struct xenvif *vif = netdev_priv(dev);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000121 int min_slots_needed;
Ian Campbellf942dc22011-03-15 00:06:18 +0000122
123 BUG_ON(skb->dev != dev);
124
Wei Liub3f980b2013-08-26 12:59:38 +0100125 /* Drop the packet if vif is not ready */
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000126 if (vif->task == NULL ||
127 vif->dealloc_task == NULL ||
128 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000129 goto drop;
130
Paul Durrantca2f09f2013-12-06 16:36:07 +0000131 /* At best we'll need one slot for the header and one for each
132 * frag.
133 */
134 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000135
Paul Durrantca2f09f2013-12-06 16:36:07 +0000136 /* If the skb is GSO then we'll also need an extra slot for the
137 * metadata.
138 */
139 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
140 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
141 min_slots_needed++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000142
Paul Durrantca2f09f2013-12-06 16:36:07 +0000143 /* If the skb can't possibly fit in the remaining slots
144 * then turn off the queue to give the ring a chance to
145 * drain.
146 */
147 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
148 xenvif_stop_queue(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000149
Paul Durrantca2f09f2013-12-06 16:36:07 +0000150 skb_queue_tail(&vif->rx_queue, skb);
151 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000152
153 return NETDEV_TX_OK;
154
155 drop:
156 vif->dev->stats.tx_dropped++;
157 dev_kfree_skb(skb);
158 return NETDEV_TX_OK;
159}
160
Ian Campbellf942dc22011-03-15 00:06:18 +0000161static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{
163 struct xenvif *vif = netdev_priv(dev);
164 return &vif->dev->stats;
165}
166
167static void xenvif_up(struct xenvif *vif)
168{
Wei Liub3f980b2013-08-26 12:59:38 +0100169 napi_enable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000170 enable_irq(vif->tx_irq);
171 if (vif->tx_irq != vif->rx_irq)
172 enable_irq(vif->rx_irq);
Wei Liu73764192013-08-26 12:59:39 +0100173 xenvif_check_rx_xenvif(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000174}
175
176static void xenvif_down(struct xenvif *vif)
177{
Wei Liub3f980b2013-08-26 12:59:38 +0100178 napi_disable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000179 disable_irq(vif->tx_irq);
180 if (vif->tx_irq != vif->rx_irq)
181 disable_irq(vif->rx_irq);
David Vrabel3e55f8b2013-02-14 03:18:58 +0000182 del_timer_sync(&vif->credit_timeout);
Ian Campbellf942dc22011-03-15 00:06:18 +0000183}
184
185static int xenvif_open(struct net_device *dev)
186{
187 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev))
189 xenvif_up(vif);
190 netif_start_queue(dev);
191 return 0;
192}
193
194static int xenvif_close(struct net_device *dev)
195{
196 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev))
198 xenvif_down(vif);
199 netif_stop_queue(dev);
200 return 0;
201}
202
203static int xenvif_change_mtu(struct net_device *dev, int mtu)
204{
205 struct xenvif *vif = netdev_priv(dev);
206 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
207
208 if (mtu > max)
209 return -EINVAL;
210 dev->mtu = mtu;
211 return 0;
212}
213
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000214static netdev_features_t xenvif_fix_features(struct net_device *dev,
215 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000216{
217 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000218
Michał Mirosław47103042011-04-19 03:35:06 +0000219 if (!vif->can_sg)
220 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100221 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000222 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100223 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
224 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100225 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000226 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100227 if (!vif->ipv6_csum)
228 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000229
Michał Mirosław47103042011-04-19 03:35:06 +0000230 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000231}
232
233static const struct xenvif_stat {
234 char name[ETH_GSTRING_LEN];
235 u16 offset;
236} xenvif_stats[] = {
237 {
238 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup)
240 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000241 /* If (sent != success + fail), there are probably packets never
242 * freed up properly!
243 */
244 {
245 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent),
247 },
248 {
249 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success),
251 },
252 {
253 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail)
255 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000256};
257
258static int xenvif_get_sset_count(struct net_device *dev, int string_set)
259{
260 switch (string_set) {
261 case ETH_SS_STATS:
262 return ARRAY_SIZE(xenvif_stats);
263 default:
264 return -EINVAL;
265 }
266}
267
268static void xenvif_get_ethtool_stats(struct net_device *dev,
269 struct ethtool_stats *stats, u64 * data)
270{
271 void *vif = netdev_priv(dev);
272 int i;
273
274 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
275 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
276}
277
278static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
279{
280 int i;
281
282 switch (stringset) {
283 case ETH_SS_STATS:
284 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
285 memcpy(data + i * ETH_GSTRING_LEN,
286 xenvif_stats[i].name, ETH_GSTRING_LEN);
287 break;
288 }
289}
290
stephen hemminger813abbb2012-01-04 11:56:58 +0000291static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000292 .get_link = ethtool_op_get_link,
293
294 .get_sset_count = xenvif_get_sset_count,
295 .get_ethtool_stats = xenvif_get_ethtool_stats,
296 .get_strings = xenvif_get_strings,
297};
298
stephen hemminger813abbb2012-01-04 11:56:58 +0000299static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000300 .ndo_start_xmit = xenvif_start_xmit,
301 .ndo_get_stats = xenvif_get_stats,
302 .ndo_open = xenvif_open,
303 .ndo_stop = xenvif_close,
304 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000305 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000306 .ndo_set_mac_address = eth_mac_addr,
307 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000308};
309
310struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
311 unsigned int handle)
312{
313 int err;
314 struct net_device *dev;
315 struct xenvif *vif;
316 char name[IFNAMSIZ] = {};
Wei Liub3f980b2013-08-26 12:59:38 +0100317 int i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000318
319 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
320 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
321 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100322 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000323 return ERR_PTR(-ENOMEM);
324 }
325
326 SET_NETDEV_DEV(dev, parent);
327
328 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000329
330 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
331 MAX_GRANT_COPY_OPS);
332 if (vif->grant_copy_op == NULL) {
333 pr_warn("Could not allocate grant copy space for %s\n", name);
334 free_netdev(dev);
335 return ERR_PTR(-ENOMEM);
336 }
337
Ian Campbellf942dc22011-03-15 00:06:18 +0000338 vif->domid = domid;
339 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000340 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100341 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000342 vif->dev = dev;
Ian Campbellf942dc22011-03-15 00:06:18 +0000343
344 vif->credit_bytes = vif->remaining_credit = ~0UL;
345 vif->credit_usec = 0UL;
346 init_timer(&vif->credit_timeout);
Wei Liu059dfa62013-10-28 12:07:57 +0000347 vif->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000348
349 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100350 dev->hw_features = NETIF_F_SG |
351 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100352 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100353 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000354 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
355
356 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
357
Wei Liub3f980b2013-08-26 12:59:38 +0100358 skb_queue_head_init(&vif->rx_queue);
359 skb_queue_head_init(&vif->tx_queue);
360
361 vif->pending_cons = 0;
362 vif->pending_prod = MAX_PENDING_REQS;
363 for (i = 0; i < MAX_PENDING_REQS; i++)
364 vif->pending_ring[i] = i;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000365 spin_lock_init(&vif->callback_lock);
366 spin_lock_init(&vif->response_lock);
367 /* If ballooning is disabled, this will consume real memory, so you
368 * better enable it. The long term solution would be to use just a
369 * bunch of valid page descriptors, without dependency on ballooning
370 */
371 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
372 vif->mmap_pages,
373 false);
374 if (err) {
375 netdev_err(dev, "Could not reserve mmap_pages\n");
376 return ERR_PTR(-ENOMEM);
377 }
378 for (i = 0; i < MAX_PENDING_REQS; i++) {
379 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
380 { .callback = xenvif_zerocopy_callback,
381 .ctx = NULL,
382 .desc = i };
383 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
384 }
Wei Liub3f980b2013-08-26 12:59:38 +0100385
Ian Campbellf942dc22011-03-15 00:06:18 +0000386 /*
387 * Initialise a dummy MAC address. We choose the numerically
388 * largest non-broadcast address to prevent the address getting
389 * stolen by an Ethernet bridge for STP purposes.
390 * (FE:FF:FF:FF:FF:FF)
391 */
392 memset(dev->dev_addr, 0xFF, ETH_ALEN);
393 dev->dev_addr[0] &= ~0x01;
394
Wei Liub3f980b2013-08-26 12:59:38 +0100395 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
396
Ian Campbellf942dc22011-03-15 00:06:18 +0000397 netif_carrier_off(dev);
398
399 err = register_netdev(dev);
400 if (err) {
401 netdev_warn(dev, "Could not register device: err=%d\n", err);
402 free_netdev(dev);
403 return ERR_PTR(err);
404 }
405
406 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100407
408 __module_get(THIS_MODULE);
409
Ian Campbellf942dc22011-03-15 00:06:18 +0000410 return vif;
411}
412
413int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +0000414 unsigned long rx_ring_ref, unsigned int tx_evtchn,
415 unsigned int rx_evtchn)
Ian Campbellf942dc22011-03-15 00:06:18 +0000416{
Paul Durrant67fa3662013-12-03 14:06:25 +0000417 struct task_struct *task;
Ian Campbellf942dc22011-03-15 00:06:18 +0000418 int err = -ENOMEM;
419
Paul Durrant67fa3662013-12-03 14:06:25 +0000420 BUG_ON(vif->tx_irq);
421 BUG_ON(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000422 BUG_ON(vif->dealloc_task);
Ian Campbellf942dc22011-03-15 00:06:18 +0000423
Wei Liu73764192013-08-26 12:59:39 +0100424 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
Ian Campbellf942dc22011-03-15 00:06:18 +0000425 if (err < 0)
426 goto err;
427
Paul Durrantca2f09f2013-12-06 16:36:07 +0000428 init_waitqueue_head(&vif->wq);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000429 init_waitqueue_head(&vif->dealloc_wq);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000430
Wei Liue1f00a692013-05-22 06:34:45 +0000431 if (tx_evtchn == rx_evtchn) {
432 /* feature-split-event-channels == 0 */
433 err = bind_interdomain_evtchn_to_irqhandler(
434 vif->domid, tx_evtchn, xenvif_interrupt, 0,
435 vif->dev->name, vif);
436 if (err < 0)
437 goto err_unmap;
438 vif->tx_irq = vif->rx_irq = err;
439 disable_irq(vif->tx_irq);
440 } else {
441 /* feature-split-event-channels == 1 */
442 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
443 "%s-tx", vif->dev->name);
444 err = bind_interdomain_evtchn_to_irqhandler(
445 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
446 vif->tx_irq_name, vif);
447 if (err < 0)
448 goto err_unmap;
449 vif->tx_irq = err;
450 disable_irq(vif->tx_irq);
451
452 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
453 "%s-rx", vif->dev->name);
454 err = bind_interdomain_evtchn_to_irqhandler(
455 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
456 vif->rx_irq_name, vif);
457 if (err < 0)
458 goto err_tx_unbind;
459 vif->rx_irq = err;
460 disable_irq(vif->rx_irq);
461 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000462
Zoltan Kiss121fa4b2014-03-06 21:48:24 +0000463 task = kthread_create(xenvif_kthread_guest_rx,
464 (void *)vif, "%s-guest-rx", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000465 if (IS_ERR(task)) {
Wei Liub3f980b2013-08-26 12:59:38 +0100466 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000467 err = PTR_ERR(task);
Wei Liub3f980b2013-08-26 12:59:38 +0100468 goto err_rx_unbind;
469 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000470
Paul Durrant67fa3662013-12-03 14:06:25 +0000471 vif->task = task;
472
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000473 task = kthread_create(xenvif_dealloc_kthread,
474 (void *)vif, "%s-dealloc", vif->dev->name);
475 if (IS_ERR(task)) {
476 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
477 err = PTR_ERR(task);
478 goto err_rx_unbind;
479 }
480
481 vif->dealloc_task = task;
482
Ian Campbellf942dc22011-03-15 00:06:18 +0000483 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000484 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
485 dev_set_mtu(vif->dev, ETH_DATA_LEN);
486 netdev_update_features(vif->dev);
487 netif_carrier_on(vif->dev);
David Vrabeld0e5d832011-09-30 06:37:51 +0000488 if (netif_running(vif->dev))
489 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000490 rtnl_unlock();
491
Wei Liub3f980b2013-08-26 12:59:38 +0100492 wake_up_process(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000493 wake_up_process(vif->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100494
Ian Campbellf942dc22011-03-15 00:06:18 +0000495 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100496
497err_rx_unbind:
498 unbind_from_irqhandler(vif->rx_irq, vif);
499 vif->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000500err_tx_unbind:
501 unbind_from_irqhandler(vif->tx_irq, vif);
502 vif->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000503err_unmap:
Wei Liu73764192013-08-26 12:59:39 +0100504 xenvif_unmap_frontend_rings(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000505err:
Wei Liub103f352013-05-16 23:26:11 +0000506 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000507 return err;
508}
509
Ian Campbell488562862013-02-06 23:41:35 +0000510void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000511{
512 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000513
514 rtnl_lock();
515 netif_carrier_off(dev); /* discard queued packets */
516 if (netif_running(dev))
517 xenvif_down(vif);
518 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000519}
520
521void xenvif_disconnect(struct xenvif *vif)
522{
523 if (netif_carrier_ok(vif->dev))
524 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000525
Paul Durrant67fa3662013-12-03 14:06:25 +0000526 if (vif->task) {
David Vrabeldb739ef2013-11-21 15:26:09 +0000527 kthread_stop(vif->task);
Paul Durrant67fa3662013-12-03 14:06:25 +0000528 vif->task = NULL;
529 }
David Vrabeldb739ef2013-11-21 15:26:09 +0000530
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000531 if (vif->dealloc_task) {
532 kthread_stop(vif->dealloc_task);
533 vif->dealloc_task = NULL;
534 }
535
Wei Liue1f00a692013-05-22 06:34:45 +0000536 if (vif->tx_irq) {
537 if (vif->tx_irq == vif->rx_irq)
538 unbind_from_irqhandler(vif->tx_irq, vif);
539 else {
540 unbind_from_irqhandler(vif->tx_irq, vif);
541 unbind_from_irqhandler(vif->rx_irq, vif);
542 }
Paul Durrant279f4382013-09-17 17:46:08 +0100543 vif->tx_irq = 0;
Wei Liub103f352013-05-16 23:26:11 +0000544 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000545
Paul Durrant279f4382013-09-17 17:46:08 +0100546 xenvif_unmap_frontend_rings(vif);
547}
548
549void xenvif_free(struct xenvif *vif)
550{
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000551 int i, unmap_timeout = 0;
552
553 for (i = 0; i < MAX_PENDING_REQS; ++i) {
554 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
555 unmap_timeout++;
556 schedule_timeout(msecs_to_jiffies(1000));
557 if (unmap_timeout > 9 &&
558 net_ratelimit())
559 netdev_err(vif->dev,
560 "Page still granted! Index: %x\n",
561 i);
562 i = -1;
563 }
564 }
565
566 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
567
Wei Liub3f980b2013-08-26 12:59:38 +0100568 netif_napi_del(&vif->napi);
569
Ian Campbellf942dc22011-03-15 00:06:18 +0000570 unregister_netdev(vif->dev);
571
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000572 vfree(vif->grant_copy_op);
Ian Campbellf942dc22011-03-15 00:06:18 +0000573 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000574
Paul Durrant279f4382013-09-17 17:46:08 +0100575 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000576}