blob: b646039e539b8039080c54236ce40946c6c72a21 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Josh Boyerf35f76e2014-01-05 10:24:01 -050037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
46int xenvif_schedulable(struct xenvif *vif)
47{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
49}
50
Wei Liue1f00a692013-05-22 06:34:45 +000051static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000052{
53 struct xenvif *vif = dev_id;
54
Wei Liub3f980b2013-08-26 12:59:38 +010055 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
56 napi_schedule(&vif->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000057
Wei Liue1f00a692013-05-22 06:34:45 +000058 return IRQ_HANDLED;
59}
60
Wei Liub3f980b2013-08-26 12:59:38 +010061static int xenvif_poll(struct napi_struct *napi, int budget)
62{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
64 int work_done;
65
Wei Liu73764192013-08-26 12:59:39 +010066 work_done = xenvif_tx_action(vif, budget);
Wei Liub3f980b2013-08-26 12:59:38 +010067
68 if (work_done < budget) {
69 int more_to_do = 0;
70 unsigned long flags;
71
72 /* It is necessary to disable IRQ before calling
73 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
74 * lose event from the frontend.
75 *
76 * Consider:
77 * RING_HAS_UNCONSUMED_REQUESTS
78 * <frontend generates event to trigger napi_schedule>
79 * __napi_complete
80 *
81 * This handler is still in scheduled state so the
82 * event has no effect at all. After __napi_complete
83 * this handler is descheduled and cannot get
84 * scheduled again. We lose event in this case and the ring
85 * will be completely stalled.
86 */
87
88 local_irq_save(flags);
89
90 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000091 if (!(more_to_do &&
92 xenvif_tx_pending_slots_available(vif)))
Wei Liub3f980b2013-08-26 12:59:38 +010093 __napi_complete(napi);
94
95 local_irq_restore(flags);
96 }
97
98 return work_done;
99}
100
Wei Liue1f00a692013-05-22 06:34:45 +0000101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
102{
103 struct xenvif *vif = dev_id;
104
Paul Durrantca2f09f2013-12-06 16:36:07 +0000105 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000106
107 return IRQ_HANDLED;
108}
109
Wei Liue1f00a692013-05-22 06:34:45 +0000110static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
111{
112 xenvif_tx_interrupt(irq, dev_id);
113 xenvif_rx_interrupt(irq, dev_id);
114
115 return IRQ_HANDLED;
116}
117
Ian Campbellf942dc22011-03-15 00:06:18 +0000118static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
119{
120 struct xenvif *vif = netdev_priv(dev);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000121 int min_slots_needed;
Ian Campbellf942dc22011-03-15 00:06:18 +0000122
123 BUG_ON(skb->dev != dev);
124
Wei Liub3f980b2013-08-26 12:59:38 +0100125 /* Drop the packet if vif is not ready */
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000126 if (vif->task == NULL ||
127 vif->dealloc_task == NULL ||
128 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000129 goto drop;
130
Paul Durrantca2f09f2013-12-06 16:36:07 +0000131 /* At best we'll need one slot for the header and one for each
132 * frag.
133 */
134 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000135
Paul Durrantca2f09f2013-12-06 16:36:07 +0000136 /* If the skb is GSO then we'll also need an extra slot for the
137 * metadata.
138 */
139 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
140 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
141 min_slots_needed++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000142
Paul Durrantca2f09f2013-12-06 16:36:07 +0000143 /* If the skb can't possibly fit in the remaining slots
144 * then turn off the queue to give the ring a chance to
145 * drain.
146 */
147 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
148 xenvif_stop_queue(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000149
Paul Durrantca2f09f2013-12-06 16:36:07 +0000150 skb_queue_tail(&vif->rx_queue, skb);
151 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000152
153 return NETDEV_TX_OK;
154
155 drop:
156 vif->dev->stats.tx_dropped++;
157 dev_kfree_skb(skb);
158 return NETDEV_TX_OK;
159}
160
Ian Campbellf942dc22011-03-15 00:06:18 +0000161static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{
163 struct xenvif *vif = netdev_priv(dev);
164 return &vif->dev->stats;
165}
166
167static void xenvif_up(struct xenvif *vif)
168{
Wei Liub3f980b2013-08-26 12:59:38 +0100169 napi_enable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000170 enable_irq(vif->tx_irq);
171 if (vif->tx_irq != vif->rx_irq)
172 enable_irq(vif->rx_irq);
Wei Liu73764192013-08-26 12:59:39 +0100173 xenvif_check_rx_xenvif(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000174}
175
176static void xenvif_down(struct xenvif *vif)
177{
Wei Liub3f980b2013-08-26 12:59:38 +0100178 napi_disable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000179 disable_irq(vif->tx_irq);
180 if (vif->tx_irq != vif->rx_irq)
181 disable_irq(vif->rx_irq);
David Vrabel3e55f8b2013-02-14 03:18:58 +0000182 del_timer_sync(&vif->credit_timeout);
Ian Campbellf942dc22011-03-15 00:06:18 +0000183}
184
185static int xenvif_open(struct net_device *dev)
186{
187 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev))
189 xenvif_up(vif);
190 netif_start_queue(dev);
191 return 0;
192}
193
194static int xenvif_close(struct net_device *dev)
195{
196 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev))
198 xenvif_down(vif);
199 netif_stop_queue(dev);
200 return 0;
201}
202
203static int xenvif_change_mtu(struct net_device *dev, int mtu)
204{
205 struct xenvif *vif = netdev_priv(dev);
206 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
207
208 if (mtu > max)
209 return -EINVAL;
210 dev->mtu = mtu;
211 return 0;
212}
213
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000214static netdev_features_t xenvif_fix_features(struct net_device *dev,
215 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000216{
217 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000218
Michał Mirosław47103042011-04-19 03:35:06 +0000219 if (!vif->can_sg)
220 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100221 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000222 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100223 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
224 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100225 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000226 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100227 if (!vif->ipv6_csum)
228 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000229
Michał Mirosław47103042011-04-19 03:35:06 +0000230 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000231}
232
233static const struct xenvif_stat {
234 char name[ETH_GSTRING_LEN];
235 u16 offset;
236} xenvif_stats[] = {
237 {
238 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup)
240 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000241 /* If (sent != success + fail), there are probably packets never
242 * freed up properly!
243 */
244 {
245 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent),
247 },
248 {
249 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success),
251 },
252 {
253 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail)
255 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
257 * a guest with the same MAX_SKB_FRAG
258 */
259 {
260 "tx_frag_overflow",
261 offsetof(struct xenvif, tx_frag_overflow)
262 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000263};
264
265static int xenvif_get_sset_count(struct net_device *dev, int string_set)
266{
267 switch (string_set) {
268 case ETH_SS_STATS:
269 return ARRAY_SIZE(xenvif_stats);
270 default:
271 return -EINVAL;
272 }
273}
274
275static void xenvif_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *stats, u64 * data)
277{
278 void *vif = netdev_priv(dev);
279 int i;
280
281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
283}
284
285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
286{
287 int i;
288
289 switch (stringset) {
290 case ETH_SS_STATS:
291 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
292 memcpy(data + i * ETH_GSTRING_LEN,
293 xenvif_stats[i].name, ETH_GSTRING_LEN);
294 break;
295 }
296}
297
stephen hemminger813abbb2012-01-04 11:56:58 +0000298static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000299 .get_link = ethtool_op_get_link,
300
301 .get_sset_count = xenvif_get_sset_count,
302 .get_ethtool_stats = xenvif_get_ethtool_stats,
303 .get_strings = xenvif_get_strings,
304};
305
stephen hemminger813abbb2012-01-04 11:56:58 +0000306static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000307 .ndo_start_xmit = xenvif_start_xmit,
308 .ndo_get_stats = xenvif_get_stats,
309 .ndo_open = xenvif_open,
310 .ndo_stop = xenvif_close,
311 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000312 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000313 .ndo_set_mac_address = eth_mac_addr,
314 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000315};
316
317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
318 unsigned int handle)
319{
320 int err;
321 struct net_device *dev;
322 struct xenvif *vif;
323 char name[IFNAMSIZ] = {};
Wei Liub3f980b2013-08-26 12:59:38 +0100324 int i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000325
326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
328 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100329 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000330 return ERR_PTR(-ENOMEM);
331 }
332
333 SET_NETDEV_DEV(dev, parent);
334
335 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000336
337 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
338 MAX_GRANT_COPY_OPS);
339 if (vif->grant_copy_op == NULL) {
340 pr_warn("Could not allocate grant copy space for %s\n", name);
341 free_netdev(dev);
342 return ERR_PTR(-ENOMEM);
343 }
344
Ian Campbellf942dc22011-03-15 00:06:18 +0000345 vif->domid = domid;
346 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000347 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100348 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000349 vif->dev = dev;
Ian Campbellf942dc22011-03-15 00:06:18 +0000350
351 vif->credit_bytes = vif->remaining_credit = ~0UL;
352 vif->credit_usec = 0UL;
353 init_timer(&vif->credit_timeout);
Wei Liu059dfa62013-10-28 12:07:57 +0000354 vif->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000355
356 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100357 dev->hw_features = NETIF_F_SG |
358 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100359 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100360 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000361 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
362
363 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
364
Wei Liub3f980b2013-08-26 12:59:38 +0100365 skb_queue_head_init(&vif->rx_queue);
366 skb_queue_head_init(&vif->tx_queue);
367
368 vif->pending_cons = 0;
369 vif->pending_prod = MAX_PENDING_REQS;
370 for (i = 0; i < MAX_PENDING_REQS; i++)
371 vif->pending_ring[i] = i;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000372 spin_lock_init(&vif->callback_lock);
373 spin_lock_init(&vif->response_lock);
374 /* If ballooning is disabled, this will consume real memory, so you
375 * better enable it. The long term solution would be to use just a
376 * bunch of valid page descriptors, without dependency on ballooning
377 */
378 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
379 vif->mmap_pages,
380 false);
381 if (err) {
382 netdev_err(dev, "Could not reserve mmap_pages\n");
383 return ERR_PTR(-ENOMEM);
384 }
385 for (i = 0; i < MAX_PENDING_REQS; i++) {
386 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
387 { .callback = xenvif_zerocopy_callback,
388 .ctx = NULL,
389 .desc = i };
390 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
391 }
Wei Liub3f980b2013-08-26 12:59:38 +0100392
Ian Campbellf942dc22011-03-15 00:06:18 +0000393 /*
394 * Initialise a dummy MAC address. We choose the numerically
395 * largest non-broadcast address to prevent the address getting
396 * stolen by an Ethernet bridge for STP purposes.
397 * (FE:FF:FF:FF:FF:FF)
398 */
399 memset(dev->dev_addr, 0xFF, ETH_ALEN);
400 dev->dev_addr[0] &= ~0x01;
401
Wei Liub3f980b2013-08-26 12:59:38 +0100402 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
403
Ian Campbellf942dc22011-03-15 00:06:18 +0000404 netif_carrier_off(dev);
405
406 err = register_netdev(dev);
407 if (err) {
408 netdev_warn(dev, "Could not register device: err=%d\n", err);
409 free_netdev(dev);
410 return ERR_PTR(err);
411 }
412
413 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100414
415 __module_get(THIS_MODULE);
416
Ian Campbellf942dc22011-03-15 00:06:18 +0000417 return vif;
418}
419
420int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +0000421 unsigned long rx_ring_ref, unsigned int tx_evtchn,
422 unsigned int rx_evtchn)
Ian Campbellf942dc22011-03-15 00:06:18 +0000423{
Paul Durrant67fa3662013-12-03 14:06:25 +0000424 struct task_struct *task;
Ian Campbellf942dc22011-03-15 00:06:18 +0000425 int err = -ENOMEM;
426
Paul Durrant67fa3662013-12-03 14:06:25 +0000427 BUG_ON(vif->tx_irq);
428 BUG_ON(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000429 BUG_ON(vif->dealloc_task);
Ian Campbellf942dc22011-03-15 00:06:18 +0000430
Wei Liu73764192013-08-26 12:59:39 +0100431 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
Ian Campbellf942dc22011-03-15 00:06:18 +0000432 if (err < 0)
433 goto err;
434
Paul Durrantca2f09f2013-12-06 16:36:07 +0000435 init_waitqueue_head(&vif->wq);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000436 init_waitqueue_head(&vif->dealloc_wq);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000437
Wei Liue1f00a692013-05-22 06:34:45 +0000438 if (tx_evtchn == rx_evtchn) {
439 /* feature-split-event-channels == 0 */
440 err = bind_interdomain_evtchn_to_irqhandler(
441 vif->domid, tx_evtchn, xenvif_interrupt, 0,
442 vif->dev->name, vif);
443 if (err < 0)
444 goto err_unmap;
445 vif->tx_irq = vif->rx_irq = err;
446 disable_irq(vif->tx_irq);
447 } else {
448 /* feature-split-event-channels == 1 */
449 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
450 "%s-tx", vif->dev->name);
451 err = bind_interdomain_evtchn_to_irqhandler(
452 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
453 vif->tx_irq_name, vif);
454 if (err < 0)
455 goto err_unmap;
456 vif->tx_irq = err;
457 disable_irq(vif->tx_irq);
458
459 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
460 "%s-rx", vif->dev->name);
461 err = bind_interdomain_evtchn_to_irqhandler(
462 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
463 vif->rx_irq_name, vif);
464 if (err < 0)
465 goto err_tx_unbind;
466 vif->rx_irq = err;
467 disable_irq(vif->rx_irq);
468 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000469
Zoltan Kiss121fa4b2014-03-06 21:48:24 +0000470 task = kthread_create(xenvif_kthread_guest_rx,
471 (void *)vif, "%s-guest-rx", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000472 if (IS_ERR(task)) {
Wei Liub3f980b2013-08-26 12:59:38 +0100473 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000474 err = PTR_ERR(task);
Wei Liub3f980b2013-08-26 12:59:38 +0100475 goto err_rx_unbind;
476 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000477
Paul Durrant67fa3662013-12-03 14:06:25 +0000478 vif->task = task;
479
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000480 task = kthread_create(xenvif_dealloc_kthread,
481 (void *)vif, "%s-dealloc", vif->dev->name);
482 if (IS_ERR(task)) {
483 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
484 err = PTR_ERR(task);
485 goto err_rx_unbind;
486 }
487
488 vif->dealloc_task = task;
489
Ian Campbellf942dc22011-03-15 00:06:18 +0000490 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000491 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
492 dev_set_mtu(vif->dev, ETH_DATA_LEN);
493 netdev_update_features(vif->dev);
494 netif_carrier_on(vif->dev);
David Vrabeld0e5d832011-09-30 06:37:51 +0000495 if (netif_running(vif->dev))
496 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000497 rtnl_unlock();
498
Wei Liub3f980b2013-08-26 12:59:38 +0100499 wake_up_process(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000500 wake_up_process(vif->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100501
Ian Campbellf942dc22011-03-15 00:06:18 +0000502 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100503
504err_rx_unbind:
505 unbind_from_irqhandler(vif->rx_irq, vif);
506 vif->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000507err_tx_unbind:
508 unbind_from_irqhandler(vif->tx_irq, vif);
509 vif->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000510err_unmap:
Wei Liu73764192013-08-26 12:59:39 +0100511 xenvif_unmap_frontend_rings(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000512err:
Wei Liub103f352013-05-16 23:26:11 +0000513 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000514 return err;
515}
516
Ian Campbell488562862013-02-06 23:41:35 +0000517void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000518{
519 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000520
521 rtnl_lock();
522 netif_carrier_off(dev); /* discard queued packets */
523 if (netif_running(dev))
524 xenvif_down(vif);
525 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000526}
527
528void xenvif_disconnect(struct xenvif *vif)
529{
530 if (netif_carrier_ok(vif->dev))
531 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000532
Paul Durrant67fa3662013-12-03 14:06:25 +0000533 if (vif->task) {
David Vrabeldb739ef2013-11-21 15:26:09 +0000534 kthread_stop(vif->task);
Paul Durrant67fa3662013-12-03 14:06:25 +0000535 vif->task = NULL;
536 }
David Vrabeldb739ef2013-11-21 15:26:09 +0000537
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000538 if (vif->dealloc_task) {
539 kthread_stop(vif->dealloc_task);
540 vif->dealloc_task = NULL;
541 }
542
Wei Liue1f00a692013-05-22 06:34:45 +0000543 if (vif->tx_irq) {
544 if (vif->tx_irq == vif->rx_irq)
545 unbind_from_irqhandler(vif->tx_irq, vif);
546 else {
547 unbind_from_irqhandler(vif->tx_irq, vif);
548 unbind_from_irqhandler(vif->rx_irq, vif);
549 }
Paul Durrant279f4382013-09-17 17:46:08 +0100550 vif->tx_irq = 0;
Wei Liub103f352013-05-16 23:26:11 +0000551 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000552
Paul Durrant279f4382013-09-17 17:46:08 +0100553 xenvif_unmap_frontend_rings(vif);
554}
555
556void xenvif_free(struct xenvif *vif)
557{
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000558 int i, unmap_timeout = 0;
559
560 for (i = 0; i < MAX_PENDING_REQS; ++i) {
561 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
562 unmap_timeout++;
563 schedule_timeout(msecs_to_jiffies(1000));
564 if (unmap_timeout > 9 &&
565 net_ratelimit())
566 netdev_err(vif->dev,
567 "Page still granted! Index: %x\n",
568 i);
569 i = -1;
570 }
571 }
572
573 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
574
Wei Liub3f980b2013-08-26 12:59:38 +0100575 netif_napi_del(&vif->napi);
576
Ian Campbellf942dc22011-03-15 00:06:18 +0000577 unregister_netdev(vif->dev);
578
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000579 vfree(vif->grant_copy_op);
Ian Campbellf942dc22011-03-15 00:06:18 +0000580 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000581
Paul Durrant279f4382013-09-17 17:46:08 +0100582 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000583}