blob: 22b4cf2fa108fe17b9fec38267efb364b9fd28df [file] [log] [blame]
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
Basil Gorf09e2242012-05-03 22:55:24 +00003#include <linux/if_vlan.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +00004#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000014#include <linux/wait.h>
15#include <linux/cdev.h>
Al Viro40401532012-02-13 03:58:52 +000016#include <linux/idr.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000017#include <linux/fs.h>
Herbert Xu6c36d2e2014-11-07 21:22:25 +080018#include <linux/uio.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000019
Ben Hutchings5188cd42014-10-30 18:27:17 +000020#include <net/ipv6.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000021#include <net/net_namespace.h>
22#include <net/rtnetlink.h>
23#include <net/sock.h>
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000024#include <linux/virtio_net.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000025
26/*
27 * A macvtap queue is the central object of this driver, it connects
28 * an open character device to a macvlan interface. There can be
29 * multiple queues on one interface, which map back to queues
30 * implemented in hardware on the underlying device.
31 *
32 * macvtap_proto is used to allocate queues through the sock allocation
33 * mechanism.
34 *
Arnd Bergmann20d29d72010-01-30 12:24:26 +000035 */
36struct macvtap_queue {
37 struct sock sk;
38 struct socket sock;
Eric Dumazet43815482010-04-29 11:01:49 +000039 struct socket_wq wq;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +030040 int vnet_hdr_sz;
Eric Dumazet13707f92011-01-26 19:28:23 +000041 struct macvlan_dev __rcu *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000042 struct file *file;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000043 unsigned int flags;
Jason Wang376b1aa2013-06-05 23:54:38 +000044 u16 queue_index;
Jason Wang815f2362013-06-05 23:54:39 +000045 bool enabled;
46 struct list_head next;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000047};
48
49static struct proto macvtap_proto = {
50 .name = "macvtap",
51 .owner = THIS_MODULE,
52 .obj_size = sizeof (struct macvtap_queue),
53};
54
55/*
Eric W. Biedermane09eff72011-10-20 04:29:24 +000056 * Variables for dealing with macvtaps device numbers.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000057 */
David S. Miller1ebed712010-07-10 19:25:50 -070058static dev_t macvtap_major;
Eric W. Biedermane09eff72011-10-20 04:29:24 +000059#define MACVTAP_NUM_DEVS (1U << MINORBITS)
60static DEFINE_MUTEX(minor_lock);
61static DEFINE_IDR(minor_idr);
62
Shirley Ma97bc3632011-07-06 12:26:11 +000063#define GOODCOPY_LEN 128
Arnd Bergmann20d29d72010-01-30 12:24:26 +000064static struct class *macvtap_class;
65static struct cdev macvtap_cdev;
66
Arnd Bergmann501c7742010-02-18 05:46:50 +000067static const struct proto_ops macvtap_socket_ops;
68
Vlad Yasevich2be5c762013-06-25 16:04:21 -040069#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
Ben Hutchings3d0ad092014-10-30 18:27:12 +000070 NETIF_F_TSO6)
Vlad Yasevich2be5c762013-06-25 16:04:21 -040071#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
Vlad Yasevicha567dd62013-08-16 15:25:00 -040072#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
73
Vlad Yasevich6acf54f2013-12-11 13:27:10 -050074static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
75{
76 return rcu_dereference(dev->rx_handler_data);
77}
78
Arnd Bergmann20d29d72010-01-30 12:24:26 +000079/*
80 * RCU usage:
Arnd Bergmann02df55d2010-02-18 05:45:36 +000081 * The macvtap_queue and the macvlan_dev are loosely coupled, the
82 * pointers from one to the other can only be read while rcu_read_lock
Vlad Yasevich441ac0f2013-06-25 16:04:19 -040083 * or rtnl is held.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000084 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +000085 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
86 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
87 * q->vlan becomes inaccessible. When the files gets closed,
88 * macvtap_get_queue() fails.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000089 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +000090 * There may still be references to the struct sock inside of the
91 * queue from outbound SKBs, but these never reference back to the
92 * file or the dev. The data structure is freed through __sk_free
93 * when both our references and any pending SKBs are gone.
Arnd Bergmann20d29d72010-01-30 12:24:26 +000094 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +000095
Jason Wang815f2362013-06-05 23:54:39 +000096static int macvtap_enable_queue(struct net_device *dev, struct file *file,
Arnd Bergmann20d29d72010-01-30 12:24:26 +000097 struct macvtap_queue *q)
98{
99 struct macvlan_dev *vlan = netdev_priv(dev);
Jason Wang815f2362013-06-05 23:54:39 +0000100 int err = -EINVAL;
101
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400102 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000103
104 if (q->enabled)
105 goto out;
106
107 err = 0;
108 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
109 q->queue_index = vlan->numvtaps;
110 q->enabled = true;
111
112 vlan->numvtaps++;
113out:
Jason Wang815f2362013-06-05 23:54:39 +0000114 return err;
115}
116
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400117/* Requires RTNL */
Jason Wang815f2362013-06-05 23:54:39 +0000118static int macvtap_set_queue(struct net_device *dev, struct file *file,
119 struct macvtap_queue *q)
120{
121 struct macvlan_dev *vlan = netdev_priv(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000122
Jason Wang815f2362013-06-05 23:54:39 +0000123 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400124 return -EBUSY;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000125
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000126 rcu_assign_pointer(q->vlan, vlan);
Jason Wang376b1aa2013-06-05 23:54:38 +0000127 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000128 sock_hold(&q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000129
130 q->file = file;
Jason Wang376b1aa2013-06-05 23:54:38 +0000131 q->queue_index = vlan->numvtaps;
Jason Wang815f2362013-06-05 23:54:39 +0000132 q->enabled = true;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000133 file->private_data = q;
Jason Wang815f2362013-06-05 23:54:39 +0000134 list_add_tail(&q->next, &vlan->queue_list);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000135
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000136 vlan->numvtaps++;
Jason Wang815f2362013-06-05 23:54:39 +0000137 vlan->numqueues++;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000138
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400139 return 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000140}
141
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400142static int macvtap_disable_queue(struct macvtap_queue *q)
Jason Wang815f2362013-06-05 23:54:39 +0000143{
144 struct macvlan_dev *vlan;
145 struct macvtap_queue *nq;
146
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400147 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000148 if (!q->enabled)
149 return -EINVAL;
150
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400151 vlan = rtnl_dereference(q->vlan);
152
Jason Wang815f2362013-06-05 23:54:39 +0000153 if (vlan) {
154 int index = q->queue_index;
155 BUG_ON(index >= vlan->numvtaps);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400156 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
Jason Wang815f2362013-06-05 23:54:39 +0000157 nq->queue_index = index;
158
159 rcu_assign_pointer(vlan->taps[index], nq);
160 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
161 q->enabled = false;
162
163 vlan->numvtaps--;
164 }
165
166 return 0;
167}
168
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000169/*
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000170 * The file owning the queue got closed, give up both
171 * the reference that the files holds as well as the
172 * one from the macvlan_dev if that still exists.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000173 *
174 * Using the spinlock makes sure that we don't get
175 * to the queue again after destroying it.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000176 */
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000177static void macvtap_put_queue(struct macvtap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000178{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000179 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000180
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400181 rtnl_lock();
182 vlan = rtnl_dereference(q->vlan);
183
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000184 if (vlan) {
Jason Wang815f2362013-06-05 23:54:39 +0000185 if (q->enabled)
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400186 BUG_ON(macvtap_disable_queue(q));
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000187
Jason Wang815f2362013-06-05 23:54:39 +0000188 vlan->numqueues--;
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000189 RCU_INIT_POINTER(q->vlan, NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000190 sock_put(&q->sk);
Jason Wang815f2362013-06-05 23:54:39 +0000191 list_del_init(&q->next);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000192 }
193
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400194 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000195
196 synchronize_rcu();
197 sock_put(&q->sk);
198}
199
200/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000201 * Select a queue based on the rxq of the device on which this packet
202 * arrived. If the incoming device is not mq, calculate a flow hash
203 * to select a queue. If all fails, find the first available queue.
204 * Cache vlan->numvtaps since it can become zero during the execution
205 * of this function.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000206 */
207static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
208 struct sk_buff *skb)
209{
210 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000211 struct macvtap_queue *tap = NULL;
Jason Wang815f2362013-06-05 23:54:39 +0000212 /* Access to taps array is protected by rcu, but access to numvtaps
213 * isn't. Below we use it to lookup a queue, but treat it as a hint
214 * and validate that the result isn't NULL - in case we are
215 * racing against queue removal.
216 */
Jason Wanged0483f2013-06-05 23:54:33 +0000217 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000218 __u32 rxq;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000219
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000220 if (!numvtaps)
221 goto out;
222
Krishna Kumaref0002b2011-11-23 22:17:14 +0000223 /* Check if we can use flow to select a queue */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800224 rxq = skb_get_hash(skb);
Krishna Kumaref0002b2011-11-23 22:17:14 +0000225 if (rxq) {
226 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000227 goto out;
Krishna Kumaref0002b2011-11-23 22:17:14 +0000228 }
229
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000230 if (likely(skb_rx_queue_recorded(skb))) {
231 rxq = skb_get_rx_queue(skb);
232
233 while (unlikely(rxq >= numvtaps))
234 rxq -= numvtaps;
235
236 tap = rcu_dereference(vlan->taps[rxq]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000237 goto out;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000238 }
239
Jason Wang376b1aa2013-06-05 23:54:38 +0000240 tap = rcu_dereference(vlan->taps[0]);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000241out:
242 return tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000243}
244
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000245/*
246 * The net_device is going away, give up the reference
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000247 * that it holds on all queues and safely set the pointer
248 * from the queues to NULL.
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000249 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000250static void macvtap_del_queues(struct net_device *dev)
251{
252 struct macvlan_dev *vlan = netdev_priv(dev);
Jason Wang815f2362013-06-05 23:54:39 +0000253 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000254 int i, j = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000255
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400256 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000257 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
258 list_del_init(&q->next);
Jason Wang376b1aa2013-06-05 23:54:38 +0000259 qlist[j++] = q;
Jason Wang376b1aa2013-06-05 23:54:38 +0000260 RCU_INIT_POINTER(q->vlan, NULL);
Jason Wang815f2362013-06-05 23:54:39 +0000261 if (q->enabled)
262 vlan->numvtaps--;
263 vlan->numqueues--;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000264 }
Jason Wang815f2362013-06-05 23:54:39 +0000265 for (i = 0; i < vlan->numvtaps; i++)
266 RCU_INIT_POINTER(vlan->taps[i], NULL);
267 BUG_ON(vlan->numvtaps);
268 BUG_ON(vlan->numqueues);
Eric W. Biederman99f34b32011-10-20 04:26:01 +0000269 /* guarantee that any future macvtap_set_queue will fail */
270 vlan->numvtaps = MAX_MACVTAP_QUEUES;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000271
272 for (--j; j >= 0; j--)
273 sock_put(&qlist[j]->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000274}
275
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500276static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000277{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500278 struct sk_buff *skb = *pskb;
279 struct net_device *dev = skb->dev;
280 struct macvlan_dev *vlan;
281 struct macvtap_queue *q;
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400282 netdev_features_t features = TAP_FEATURES;
283
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500284 vlan = macvtap_get_vlan_rcu(dev);
285 if (!vlan)
286 return RX_HANDLER_PASS;
287
288 q = macvtap_get_queue(dev, skb);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000289 if (!q)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500290 return RX_HANDLER_PASS;
Herbert Xu8a357472010-07-21 21:44:31 +0000291
292 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
293 goto drop;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000294
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500295 skb_push(skb, ETH_HLEN);
296
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400297 /* Apply the forward feature mask so that we perform segmentation
Vlad Yaseviche5733322013-08-16 15:25:02 -0400298 * according to users wishes. This only works if VNET_HDR is
299 * enabled.
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400300 */
Vlad Yaseviche5733322013-08-16 15:25:02 -0400301 if (q->flags & IFF_VNET_HDR)
302 features |= vlan->tap_features;
Tom Herbert04ffcb22014-10-14 15:19:06 -0700303 if (netif_needs_gso(dev, skb, features)) {
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400304 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
305
306 if (IS_ERR(segs))
307 goto drop;
308
309 if (!segs) {
310 skb_queue_tail(&q->sk.sk_receive_queue, skb);
311 goto wake_up;
312 }
313
314 kfree_skb(skb);
315 while (segs) {
316 struct sk_buff *nskb = segs->next;
317
318 segs->next = NULL;
319 skb_queue_tail(&q->sk.sk_receive_queue, segs);
320 segs = nskb;
321 }
322 } else {
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400323 /* If we receive a partial checksum and the tap side
324 * doesn't support checksum offload, compute the checksum.
325 * Note: it doesn't matter which checksum feature to
326 * check, we either support them all or none.
327 */
328 if (skb->ip_summed == CHECKSUM_PARTIAL &&
329 !(features & NETIF_F_ALL_CSUM) &&
330 skb_checksum_help(skb))
331 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400332 skb_queue_tail(&q->sk.sk_receive_queue, skb);
333 }
334
335wake_up:
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000336 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500337 return RX_HANDLER_CONSUMED;
Herbert Xu8a357472010-07-21 21:44:31 +0000338
339drop:
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500340 /* Count errors/drops only here, thus don't care about args. */
341 macvlan_count_rx(vlan, 0, 0, 0);
Herbert Xu8a357472010-07-21 21:44:31 +0000342 kfree_skb(skb);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500343 return RX_HANDLER_CONSUMED;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000344}
345
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000346static int macvtap_get_minor(struct macvlan_dev *vlan)
347{
348 int retval = -ENOMEM;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000349
350 mutex_lock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800351 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
352 if (retval >= 0) {
353 vlan->minor = retval;
354 } else if (retval == -ENOSPC) {
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000355 printk(KERN_ERR "too many macvtap devices\n");
356 retval = -EINVAL;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000357 }
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000358 mutex_unlock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800359 return retval < 0 ? retval : 0;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000360}
361
362static void macvtap_free_minor(struct macvlan_dev *vlan)
363{
364 mutex_lock(&minor_lock);
365 if (vlan->minor) {
366 idr_remove(&minor_idr, vlan->minor);
367 vlan->minor = 0;
368 }
369 mutex_unlock(&minor_lock);
370}
371
372static struct net_device *dev_get_by_macvtap_minor(int minor)
373{
374 struct net_device *dev = NULL;
375 struct macvlan_dev *vlan;
376
377 mutex_lock(&minor_lock);
378 vlan = idr_find(&minor_idr, minor);
379 if (vlan) {
380 dev = vlan->dev;
381 dev_hold(dev);
382 }
383 mutex_unlock(&minor_lock);
384 return dev;
385}
386
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000387static int macvtap_newlink(struct net *src_net,
388 struct net_device *dev,
389 struct nlattr *tb[],
390 struct nlattr *data[])
391{
Jason Wang815f2362013-06-05 23:54:39 +0000392 struct macvlan_dev *vlan = netdev_priv(dev);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500393 int err;
394
Jason Wang815f2362013-06-05 23:54:39 +0000395 INIT_LIST_HEAD(&vlan->queue_list);
396
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400397 /* Since macvlan supports all offloads by default, make
398 * tap support all offloads also.
399 */
400 vlan->tap_features = TUN_OFFLOADS;
401
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500402 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
403 if (err)
404 return err;
405
Eric W. Biederman9bf19072011-10-20 04:28:46 +0000406 /* Don't put anything that may fail after macvlan_common_newlink
407 * because we can't undo what it does.
408 */
Vlad Yasevich2f6a1b62013-12-11 13:27:11 -0500409 return macvlan_common_newlink(src_net, dev, tb, data);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000410}
411
412static void macvtap_dellink(struct net_device *dev,
413 struct list_head *head)
414{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500415 netdev_rx_handler_unregister(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000416 macvtap_del_queues(dev);
417 macvlan_dellink(dev, head);
418}
419
Herbert Xu8a357472010-07-21 21:44:31 +0000420static void macvtap_setup(struct net_device *dev)
421{
422 macvlan_common_setup(dev);
423 dev->tx_queue_len = TUN_READQ_SIZE;
424}
425
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000426static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
427 .kind = "macvtap",
Herbert Xu8a357472010-07-21 21:44:31 +0000428 .setup = macvtap_setup,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000429 .newlink = macvtap_newlink,
430 .dellink = macvtap_dellink,
431};
432
433
434static void macvtap_sock_write_space(struct sock *sk)
435{
Eric Dumazet43815482010-04-29 11:01:49 +0000436 wait_queue_head_t *wqueue;
437
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000438 if (!sock_writeable(sk) ||
439 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
440 return;
441
Eric Dumazet43815482010-04-29 11:01:49 +0000442 wqueue = sk_sleep(sk);
443 if (wqueue && waitqueue_active(wqueue))
444 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000445}
446
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000447static void macvtap_sock_destruct(struct sock *sk)
448{
449 skb_queue_purge(&sk->sk_receive_queue);
450}
451
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000452static int macvtap_open(struct inode *inode, struct file *file)
453{
454 struct net *net = current->nsproxy->net_ns;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400455 struct net_device *dev;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000456 struct macvtap_queue *q;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400457 int err = -ENODEV;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000458
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400459 rtnl_lock();
460 dev = dev_get_by_macvtap_minor(iminor(inode));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000461 if (!dev)
462 goto out;
463
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000464 err = -ENOMEM;
465 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
466 &macvtap_proto);
467 if (!q)
468 goto out;
469
Jason Wangd9a90a32013-06-13 14:23:35 +0800470 RCU_INIT_POINTER(q->sock.wq, &q->wq);
Eric Dumazet43815482010-04-29 11:01:49 +0000471 init_waitqueue_head(&q->wq.wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000472 q->sock.type = SOCK_RAW;
473 q->sock.state = SS_CONNECTED;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000474 q->sock.file = file;
475 q->sock.ops = &macvtap_socket_ops;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000476 sock_init_data(&q->sock, &q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000477 q->sk.sk_write_space = macvtap_sock_write_space;
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000478 q->sk.sk_destruct = macvtap_sock_destruct;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000479 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300480 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000481
Shirley Ma97bc3632011-07-06 12:26:11 +0000482 /*
483 * so far only KVM virtio_net uses macvtap, enable zero copy between
484 * guest kernel and host kernel when lower device supports zerocopy
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000485 *
486 * The macvlan supports zerocopy iff the lower device supports zero
487 * copy so we don't have to look at the lower device directly.
Shirley Ma97bc3632011-07-06 12:26:11 +0000488 */
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000489 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
490 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
Shirley Ma97bc3632011-07-06 12:26:11 +0000491
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000492 err = macvtap_set_queue(dev, file, q);
493 if (err)
494 sock_put(&q->sk);
495
496out:
497 if (dev)
498 dev_put(dev);
499
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400500 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000501 return err;
502}
503
504static int macvtap_release(struct inode *inode, struct file *file)
505{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000506 struct macvtap_queue *q = file->private_data;
507 macvtap_put_queue(q);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000508 return 0;
509}
510
511static unsigned int macvtap_poll(struct file *file, poll_table * wait)
512{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000513 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000514 unsigned int mask = POLLERR;
515
516 if (!q)
517 goto out;
518
519 mask = 0;
Eric Dumazet43815482010-04-29 11:01:49 +0000520 poll_wait(file, &q->wq.wait, wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000521
522 if (!skb_queue_empty(&q->sk.sk_receive_queue))
523 mask |= POLLIN | POLLRDNORM;
524
525 if (sock_writeable(&q->sk) ||
526 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
527 sock_writeable(&q->sk)))
528 mask |= POLLOUT | POLLWRNORM;
529
530out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000531 return mask;
532}
533
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000534static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
535 size_t len, size_t linear,
536 int noblock, int *err)
537{
538 struct sk_buff *skb;
539
540 /* Under a page? Don't bother with paged skb. */
541 if (prepad + len < PAGE_SIZE || !linear)
542 linear = len;
543
544 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -0700545 err, 0);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000546 if (!skb)
547 return NULL;
548
549 skb_reserve(skb, prepad);
550 skb_put(skb, linear);
551 skb->data_len = len - linear;
552 skb->len += len - linear;
553
554 return skb;
555}
556
557/*
558 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
559 * be shared with the tun/tap driver.
560 */
561static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
562 struct virtio_net_hdr *vnet_hdr)
563{
564 unsigned short gso_type = 0;
565 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
566 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
567 case VIRTIO_NET_HDR_GSO_TCPV4:
568 gso_type = SKB_GSO_TCPV4;
569 break;
570 case VIRTIO_NET_HDR_GSO_TCPV6:
571 gso_type = SKB_GSO_TCPV6;
572 break;
573 case VIRTIO_NET_HDR_GSO_UDP:
Ben Hutchings3d0ad092014-10-30 18:27:12 +0000574 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
575 current->comm);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000576 gso_type = SKB_GSO_UDP;
Ben Hutchings5188cd42014-10-30 18:27:17 +0000577 if (skb->protocol == htons(ETH_P_IPV6))
578 ipv6_proxy_select_ident(skb);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000579 break;
580 default:
581 return -EINVAL;
582 }
583
584 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
585 gso_type |= SKB_GSO_TCP_ECN;
586
587 if (vnet_hdr->gso_size == 0)
588 return -EINVAL;
589 }
590
591 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
592 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
593 vnet_hdr->csum_offset))
594 return -EINVAL;
595 }
596
597 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
598 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000599 skb_shinfo(skb)->gso_type = gso_type;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000600
601 /* Header must be checked, and gso_segs computed. */
602 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
603 skb_shinfo(skb)->gso_segs = 0;
604 }
605 return 0;
606}
607
Zhi Yong Wu359d44d2013-12-07 04:13:04 +0800608static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000609 struct virtio_net_hdr *vnet_hdr)
610{
611 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
612
613 if (skb_is_gso(skb)) {
614 struct skb_shared_info *sinfo = skb_shinfo(skb);
615
616 /* This is a hint as to how much should be linear. */
617 vnet_hdr->hdr_len = skb_headlen(skb);
618 vnet_hdr->gso_size = sinfo->gso_size;
619 if (sinfo->gso_type & SKB_GSO_TCPV4)
620 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
621 else if (sinfo->gso_type & SKB_GSO_TCPV6)
622 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000623 else
624 BUG();
625 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
626 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
627 } else
628 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
629
630 if (skb->ip_summed == CHECKSUM_PARTIAL) {
631 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +0000632 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
Herbert Xu3ce9b202014-11-03 14:01:25 +0800633 if (vlan_tx_tag_present(skb))
634 vnet_hdr->csum_start += VLAN_HLEN;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000635 vnet_hdr->csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +0000636 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
637 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000638 } /* else everything is zero */
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000639}
640
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000641/* Get packet from user space buffer */
Shirley Ma97bc3632011-07-06 12:26:11 +0000642static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
Al Virof5ff53b2014-06-19 15:36:49 -0400643 struct iov_iter *from, int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000644{
Jason Wang16a3fa22013-11-13 14:00:40 +0800645 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000646 struct sk_buff *skb;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000647 struct macvlan_dev *vlan;
Al Virof5ff53b2014-06-19 15:36:49 -0400648 unsigned long total_len = iov_iter_count(from);
Shirley Ma97bc3632011-07-06 12:26:11 +0000649 unsigned long len = total_len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000650 int err;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000651 struct virtio_net_hdr vnet_hdr = { 0 };
652 int vnet_hdr_len = 0;
Jason Wangb92946e2012-05-02 11:42:15 +0800653 int copylen = 0;
Shirley Ma97bc3632011-07-06 12:26:11 +0000654 bool zerocopy = false;
Jason Wang61d46bf2013-07-10 13:43:28 +0800655 size_t linear;
Al Virof5ff53b2014-06-19 15:36:49 -0400656 ssize_t n;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000657
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000658 if (q->flags & IFF_VNET_HDR) {
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300659 vnet_hdr_len = q->vnet_hdr_sz;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000660
661 err = -EINVAL;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000662 if (len < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000663 goto err;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000664 len -= vnet_hdr_len;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000665
Al Virof5ff53b2014-06-19 15:36:49 -0400666 err = -EFAULT;
667 n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
668 if (n != sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000669 goto err;
Al Virof5ff53b2014-06-19 15:36:49 -0400670 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000671 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
672 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
673 vnet_hdr.hdr_len)
674 vnet_hdr.hdr_len = vnet_hdr.csum_start +
675 vnet_hdr.csum_offset + 2;
676 err = -EINVAL;
677 if (vnet_hdr.hdr_len > len)
678 goto err;
679 }
680
681 err = -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000682 if (unlikely(len < ETH_HLEN))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000683 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000684
Jason Wangece793f2013-07-18 10:55:16 +0800685 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
Al Virof5ff53b2014-06-19 15:36:49 -0400686 struct iov_iter i;
687
Jason Wangece793f2013-07-18 10:55:16 +0800688 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
Jason Wang16a3fa22013-11-13 14:00:40 +0800689 if (copylen > good_linear)
690 copylen = good_linear;
Jason Wang61d46bf2013-07-10 13:43:28 +0800691 linear = copylen;
Al Virof5ff53b2014-06-19 15:36:49 -0400692 i = *from;
693 iov_iter_advance(&i, copylen);
694 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
Jason Wangece793f2013-07-18 10:55:16 +0800695 zerocopy = true;
696 }
697
698 if (!zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000699 copylen = len;
Jason Wang16a3fa22013-11-13 14:00:40 +0800700 if (vnet_hdr.hdr_len > good_linear)
701 linear = good_linear;
702 else
703 linear = vnet_hdr.hdr_len;
Jason Wang61d46bf2013-07-10 13:43:28 +0800704 }
Shirley Ma97bc3632011-07-06 12:26:11 +0000705
706 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
Jason Wang61d46bf2013-07-10 13:43:28 +0800707 linear, noblock, &err);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000708 if (!skb)
709 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000710
Jason Wang01d66572012-05-02 11:42:06 +0800711 if (zerocopy)
Al Virof5ff53b2014-06-19 15:36:49 -0400712 err = zerocopy_sg_from_iter(skb, from);
Jason Wangece793f2013-07-18 10:55:16 +0800713 else {
Al Virof5ff53b2014-06-19 15:36:49 -0400714 err = skb_copy_datagram_from_iter(skb, 0, from, len);
Jason Wangece793f2013-07-18 10:55:16 +0800715 if (!err && m && m->msg_control) {
716 struct ubuf_info *uarg = m->msg_control;
717 uarg->callback(uarg, false);
718 }
719 }
720
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000721 if (err)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000722 goto err_kfree;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000723
724 skb_set_network_header(skb, ETH_HLEN);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000725 skb_reset_mac_header(skb);
726 skb->protocol = eth_hdr(skb)->h_proto;
727
728 if (vnet_hdr_len) {
729 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
730 if (err)
731 goto err_kfree;
732 }
733
Jason Wang40893fd2013-03-26 23:11:22 +0000734 skb_probe_transport_header(skb, ETH_HLEN);
Jason Wang9b4d6692013-03-25 20:19:55 +0000735
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400736 rcu_read_lock();
737 vlan = rcu_dereference(q->vlan);
Shirley Ma97bc3632011-07-06 12:26:11 +0000738 /* copy skb_ubuf_info for callback when skb has no error */
Jason Wang01d66572012-05-02 11:42:06 +0800739 if (zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000740 skb_shinfo(skb)->destructor_arg = m->msg_control;
Jason Wang01d66572012-05-02 11:42:06 +0800741 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000742 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
Jason Wang01d66572012-05-02 11:42:06 +0800743 }
Eric Dumazet29d79192013-08-08 08:06:14 -0700744 if (vlan) {
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500745 skb->dev = vlan->dev;
746 dev_queue_xmit(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700747 } else {
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000748 kfree_skb(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700749 }
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400750 rcu_read_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000751
Shirley Ma97bc3632011-07-06 12:26:11 +0000752 return total_len;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000753
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000754err_kfree:
755 kfree_skb(skb);
756
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000757err:
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400758 rcu_read_lock();
759 vlan = rcu_dereference(q->vlan);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000760 if (vlan)
Jason Wangcd3e22b2013-11-25 17:19:04 +0800761 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400762 rcu_read_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000763
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000764 return err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000765}
766
Al Virof5ff53b2014-06-19 15:36:49 -0400767static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000768{
769 struct file *file = iocb->ki_filp;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000770 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000771
Al Virof5ff53b2014-06-19 15:36:49 -0400772 return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000773}
774
775/* Put packet to the user space buffer */
776static ssize_t macvtap_put_user(struct macvtap_queue *q,
777 const struct sk_buff *skb,
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800778 struct iov_iter *iter)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000779{
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000780 int ret;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000781 int vnet_hdr_len = 0;
Basil Gorf09e2242012-05-03 22:55:24 +0000782 int vlan_offset = 0;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800783 int total;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000784
785 if (q->flags & IFF_VNET_HDR) {
786 struct virtio_net_hdr vnet_hdr;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300787 vnet_hdr_len = q->vnet_hdr_sz;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800788 if (iov_iter_count(iter) < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000789 return -EINVAL;
790
Zhi Yong Wu359d44d2013-12-07 04:13:04 +0800791 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000792
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800793 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
794 sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000795 return -EFAULT;
Jason Wang7cc76f52014-11-20 16:31:05 +0800796
797 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000798 }
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800799 total = vnet_hdr_len;
Jason Wangce232ce2013-12-11 13:08:34 +0800800 total += skb->len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000801
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800802 if (vlan_tx_tag_present(skb)) {
Basil Gorf09e2242012-05-03 22:55:24 +0000803 struct {
804 __be16 h_vlan_proto;
805 __be16 h_vlan_TCI;
806 } veth;
Jason Wang0fbe0d42013-07-16 13:36:34 +0800807 veth.h_vlan_proto = skb->vlan_proto;
Basil Gorf09e2242012-05-03 22:55:24 +0000808 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000809
Basil Gorf09e2242012-05-03 22:55:24 +0000810 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
Jason Wangce232ce2013-12-11 13:08:34 +0800811 total += VLAN_HLEN;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000812
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800813 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
814 if (ret || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000815 goto done;
816
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800817 ret = copy_to_iter(&veth, sizeof(veth), iter);
818 if (ret != sizeof(veth) || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000819 goto done;
820 }
821
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800822 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
823 skb->len - vlan_offset);
Basil Gorf09e2242012-05-03 22:55:24 +0000824
825done:
Jason Wangce232ce2013-12-11 13:08:34 +0800826 return ret ? ret : total;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000827}
828
Zhi Yong Wu55ec8e22013-12-07 04:13:05 +0800829static ssize_t macvtap_do_read(struct macvtap_queue *q,
Al Viro3af0bfe2014-11-07 14:13:53 -0500830 struct iov_iter *to,
Arnd Bergmann501c7742010-02-18 05:46:50 +0000831 int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000832{
Hong zhi guoccf7e722012-06-06 22:36:27 +0000833 DEFINE_WAIT(wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000834 struct sk_buff *skb;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000835 ssize_t ret = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000836
Al Viro3af0bfe2014-11-07 14:13:53 -0500837 if (!iov_iter_count(to))
838 return 0;
839
840 while (1) {
Jason Wang89cee912013-06-05 23:54:34 +0000841 if (!noblock)
842 prepare_to_wait(sk_sleep(&q->sk), &wait,
843 TASK_INTERRUPTIBLE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000844
845 /* Read frames from the queue */
846 skb = skb_dequeue(&q->sk.sk_receive_queue);
Al Viro3af0bfe2014-11-07 14:13:53 -0500847 if (skb)
848 break;
849 if (noblock) {
850 ret = -EAGAIN;
851 break;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000852 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500853 if (signal_pending(current)) {
854 ret = -ERESTARTSYS;
855 break;
856 }
857 /* Nothing to read, let's sleep */
858 schedule();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000859 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500860 if (skb) {
861 ret = macvtap_put_user(q, skb, to);
862 kfree_skb(skb);
863 }
Jason Wang89cee912013-06-05 23:54:34 +0000864 if (!noblock)
865 finish_wait(sk_sleep(&q->sk), &wait);
Arnd Bergmann501c7742010-02-18 05:46:50 +0000866 return ret;
867}
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000868
Al Viro3af0bfe2014-11-07 14:13:53 -0500869static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
Arnd Bergmann501c7742010-02-18 05:46:50 +0000870{
871 struct file *file = iocb->ki_filp;
872 struct macvtap_queue *q = file->private_data;
Al Viro3af0bfe2014-11-07 14:13:53 -0500873 ssize_t len = iov_iter_count(to), ret;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000874
Al Viro3af0bfe2014-11-07 14:13:53 -0500875 ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
Jason Wangce232ce2013-12-11 13:08:34 +0800876 ret = min_t(ssize_t, ret, len);
Zhi Yong Wue6ebc7f2013-12-06 14:16:50 +0800877 if (ret > 0)
878 iocb->ki_pos = ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000879 return ret;
880}
881
Jason Wang8f475a32013-06-05 23:54:36 +0000882static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
883{
884 struct macvlan_dev *vlan;
885
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400886 ASSERT_RTNL();
887 vlan = rtnl_dereference(q->vlan);
Jason Wang8f475a32013-06-05 23:54:36 +0000888 if (vlan)
889 dev_hold(vlan->dev);
Jason Wang8f475a32013-06-05 23:54:36 +0000890
891 return vlan;
892}
893
894static void macvtap_put_vlan(struct macvlan_dev *vlan)
895{
896 dev_put(vlan->dev);
897}
898
Jason Wang815f2362013-06-05 23:54:39 +0000899static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
900{
901 struct macvtap_queue *q = file->private_data;
902 struct macvlan_dev *vlan;
903 int ret;
904
905 vlan = macvtap_get_vlan(q);
906 if (!vlan)
907 return -EINVAL;
908
909 if (flags & IFF_ATTACH_QUEUE)
910 ret = macvtap_enable_queue(vlan->dev, file, q);
911 else if (flags & IFF_DETACH_QUEUE)
912 ret = macvtap_disable_queue(q);
Jason Wangf57855a2013-06-13 14:23:36 +0800913 else
914 ret = -EINVAL;
Jason Wang815f2362013-06-05 23:54:39 +0000915
916 macvtap_put_vlan(vlan);
917 return ret;
918}
919
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400920static int set_offload(struct macvtap_queue *q, unsigned long arg)
921{
922 struct macvlan_dev *vlan;
923 netdev_features_t features;
924 netdev_features_t feature_mask = 0;
925
926 vlan = rtnl_dereference(q->vlan);
927 if (!vlan)
928 return -ENOLINK;
929
930 features = vlan->dev->features;
931
932 if (arg & TUN_F_CSUM) {
933 feature_mask = NETIF_F_HW_CSUM;
934
935 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
936 if (arg & TUN_F_TSO_ECN)
937 feature_mask |= NETIF_F_TSO_ECN;
938 if (arg & TUN_F_TSO4)
939 feature_mask |= NETIF_F_TSO;
940 if (arg & TUN_F_TSO6)
941 feature_mask |= NETIF_F_TSO6;
942 }
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400943 }
944
945 /* tun/tap driver inverts the usage for TSO offloads, where
946 * setting the TSO bit means that the userspace wants to
947 * accept TSO frames and turning it off means that user space
948 * does not support TSO.
949 * For macvtap, we have to invert it to mean the same thing.
950 * When user space turns off TSO, we turn off GSO/LRO so that
951 * user-space will not receive TSO frames.
952 */
Ben Hutchings3d0ad092014-10-30 18:27:12 +0000953 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400954 features |= RX_OFFLOADS;
955 else
956 features &= ~RX_OFFLOADS;
957
958 /* tap_features are the same as features on tun/tap and
959 * reflect user expectations.
960 */
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400961 vlan->tap_features = feature_mask;
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400962 vlan->set_features = features;
963 netdev_update_features(vlan->dev);
964
965 return 0;
966}
967
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000968/*
969 * provide compatibility with generic tun/tap interface
970 */
971static long macvtap_ioctl(struct file *file, unsigned int cmd,
972 unsigned long arg)
973{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000974 struct macvtap_queue *q = file->private_data;
975 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000976 void __user *argp = (void __user *)arg;
977 struct ifreq __user *ifr = argp;
978 unsigned int __user *up = argp;
979 unsigned int u;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300980 int __user *sp = argp;
981 int s;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000982 int ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000983
984 switch (cmd) {
985 case TUNSETIFF:
986 /* ignore the name, just look at flags */
987 if (get_user(u, &ifr->ifr_flags))
988 return -EFAULT;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000989
990 ret = 0;
Jason Wang815f2362013-06-05 23:54:39 +0000991 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
992 (IFF_NO_PI | IFF_TAP))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000993 ret = -EINVAL;
994 else
995 q->flags = u;
996
997 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000998
999 case TUNGETIFF:
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001000 rtnl_lock();
Jason Wang8f475a32013-06-05 23:54:36 +00001001 vlan = macvtap_get_vlan(q);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001002 if (!vlan) {
1003 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001004 return -ENOLINK;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001005 }
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001006
1007 ret = 0;
Eric Dumazet13707f92011-01-26 19:28:23 +00001008 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001009 put_user(q->flags, &ifr->ifr_flags))
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001010 ret = -EFAULT;
Jason Wang8f475a32013-06-05 23:54:36 +00001011 macvtap_put_vlan(vlan);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001012 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001013 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001014
Jason Wang815f2362013-06-05 23:54:39 +00001015 case TUNSETQUEUE:
1016 if (get_user(u, &ifr->ifr_flags))
1017 return -EFAULT;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001018 rtnl_lock();
1019 ret = macvtap_ioctl_set_queue(file, u);
1020 rtnl_unlock();
Jason Wang82a19eb2013-07-16 13:36:33 +08001021 return ret;
Jason Wang815f2362013-06-05 23:54:39 +00001022
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001023 case TUNGETFEATURES:
Jason Wangdf09b362013-06-05 23:54:40 +00001024 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1025 IFF_MULTI_QUEUE, up))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001026 return -EFAULT;
1027 return 0;
1028
1029 case TUNSETSNDBUF:
1030 if (get_user(u, up))
1031 return -EFAULT;
1032
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001033 q->sk.sk_sndbuf = u;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001034 return 0;
1035
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001036 case TUNGETVNETHDRSZ:
1037 s = q->vnet_hdr_sz;
1038 if (put_user(s, sp))
1039 return -EFAULT;
1040 return 0;
1041
1042 case TUNSETVNETHDRSZ:
1043 if (get_user(s, sp))
1044 return -EFAULT;
1045 if (s < (int)sizeof(struct virtio_net_hdr))
1046 return -EINVAL;
1047
1048 q->vnet_hdr_sz = s;
1049 return 0;
1050
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001051 case TUNSETOFFLOAD:
1052 /* let the user check for future flags */
1053 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
Ben Hutchings3d0ad092014-10-30 18:27:12 +00001054 TUN_F_TSO_ECN))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001055 return -EINVAL;
1056
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001057 rtnl_lock();
1058 ret = set_offload(q, arg);
1059 rtnl_unlock();
1060 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001061
1062 default:
1063 return -EINVAL;
1064 }
1065}
1066
1067#ifdef CONFIG_COMPAT
1068static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1069 unsigned long arg)
1070{
1071 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1072}
1073#endif
1074
1075static const struct file_operations macvtap_fops = {
1076 .owner = THIS_MODULE,
1077 .open = macvtap_open,
1078 .release = macvtap_release,
Al Viro3af0bfe2014-11-07 14:13:53 -05001079 .read = new_sync_read,
Al Virof5ff53b2014-06-19 15:36:49 -04001080 .write = new_sync_write,
Al Viro3af0bfe2014-11-07 14:13:53 -05001081 .read_iter = macvtap_read_iter,
Al Virof5ff53b2014-06-19 15:36:49 -04001082 .write_iter = macvtap_write_iter,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001083 .poll = macvtap_poll,
1084 .llseek = no_llseek,
1085 .unlocked_ioctl = macvtap_ioctl,
1086#ifdef CONFIG_COMPAT
1087 .compat_ioctl = macvtap_compat_ioctl,
1088#endif
1089};
1090
Arnd Bergmann501c7742010-02-18 05:46:50 +00001091static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1092 struct msghdr *m, size_t total_len)
1093{
1094 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
Al Virof5ff53b2014-06-19 15:36:49 -04001095 struct iov_iter from;
1096 iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, total_len);
1097 return macvtap_get_user(q, m, &from, m->msg_flags & MSG_DONTWAIT);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001098}
1099
1100static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1101 struct msghdr *m, size_t total_len,
1102 int flags)
1103{
1104 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
Al Viro3af0bfe2014-11-07 14:13:53 -05001105 struct iov_iter to;
Arnd Bergmann501c7742010-02-18 05:46:50 +00001106 int ret;
1107 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1108 return -EINVAL;
Al Viro3af0bfe2014-11-07 14:13:53 -05001109 iov_iter_init(&to, READ, m->msg_iov, m->msg_iovlen, total_len);
1110 ret = macvtap_do_read(q, &to, flags & MSG_DONTWAIT);
David S. Millerde2aa472013-12-10 22:06:18 -05001111 if (ret > total_len) {
1112 m->msg_flags |= MSG_TRUNC;
1113 ret = flags & MSG_TRUNC ? ret : total_len;
1114 }
Arnd Bergmann501c7742010-02-18 05:46:50 +00001115 return ret;
1116}
1117
1118/* Ops structure to mimic raw sockets with tun */
1119static const struct proto_ops macvtap_socket_ops = {
1120 .sendmsg = macvtap_sendmsg,
1121 .recvmsg = macvtap_recvmsg,
1122};
1123
1124/* Get an underlying socket object from tun file. Returns error unless file is
1125 * attached to a device. The returned object works like a packet socket, it
1126 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1127 * holding a reference to the file for as long as the socket is in use. */
1128struct socket *macvtap_get_socket(struct file *file)
1129{
1130 struct macvtap_queue *q;
1131 if (file->f_op != &macvtap_fops)
1132 return ERR_PTR(-EINVAL);
1133 q = file->private_data;
1134 if (!q)
1135 return ERR_PTR(-EBADFD);
1136 return &q->sock;
1137}
1138EXPORT_SYMBOL_GPL(macvtap_get_socket);
1139
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001140static int macvtap_device_event(struct notifier_block *unused,
1141 unsigned long event, void *ptr)
1142{
Jiri Pirko351638e2013-05-28 01:30:21 +00001143 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001144 struct macvlan_dev *vlan;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001145 struct device *classdev;
1146 dev_t devt;
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001147 int err;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001148
1149 if (dev->rtnl_link_ops != &macvtap_link_ops)
1150 return NOTIFY_DONE;
1151
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001152 vlan = netdev_priv(dev);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001153
1154 switch (event) {
1155 case NETDEV_REGISTER:
1156 /* Create the device node here after the network device has
1157 * been registered but before register_netdevice has
1158 * finished running.
1159 */
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001160 err = macvtap_get_minor(vlan);
1161 if (err)
1162 return notifier_from_errno(err);
1163
1164 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001165 classdev = device_create(macvtap_class, &dev->dev, devt,
1166 dev, "tap%d", dev->ifindex);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001167 if (IS_ERR(classdev)) {
1168 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001169 return notifier_from_errno(PTR_ERR(classdev));
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001170 }
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001171 break;
1172 case NETDEV_UNREGISTER:
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001173 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001174 device_destroy(macvtap_class, devt);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001175 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001176 break;
1177 }
1178
1179 return NOTIFY_DONE;
1180}
1181
1182static struct notifier_block macvtap_notifier_block __read_mostly = {
1183 .notifier_call = macvtap_device_event,
1184};
1185
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001186static int macvtap_init(void)
1187{
1188 int err;
1189
1190 err = alloc_chrdev_region(&macvtap_major, 0,
1191 MACVTAP_NUM_DEVS, "macvtap");
1192 if (err)
1193 goto out1;
1194
1195 cdev_init(&macvtap_cdev, &macvtap_fops);
1196 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1197 if (err)
1198 goto out2;
1199
1200 macvtap_class = class_create(THIS_MODULE, "macvtap");
1201 if (IS_ERR(macvtap_class)) {
1202 err = PTR_ERR(macvtap_class);
1203 goto out3;
1204 }
1205
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001206 err = register_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001207 if (err)
1208 goto out4;
1209
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001210 err = macvlan_link_register(&macvtap_link_ops);
1211 if (err)
1212 goto out5;
1213
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001214 return 0;
1215
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001216out5:
1217 unregister_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001218out4:
1219 class_unregister(macvtap_class);
1220out3:
1221 cdev_del(&macvtap_cdev);
1222out2:
1223 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1224out1:
1225 return err;
1226}
1227module_init(macvtap_init);
1228
1229static void macvtap_exit(void)
1230{
1231 rtnl_link_unregister(&macvtap_link_ops);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001232 unregister_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001233 class_unregister(macvtap_class);
1234 cdev_del(&macvtap_cdev);
1235 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1236}
1237module_exit(macvtap_exit);
1238
1239MODULE_ALIAS_RTNL_LINK("macvtap");
1240MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1241MODULE_LICENSE("GPL");