blob: 44ccc674c02f9fc8e2aae0b37c1f2103cfb3c354 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
Stefano Stabellinica981632012-08-08 17:21:23 +000043#include <xen/xen.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000044#include <xen/events.h>
45#include <xen/interface/memory.h>
46
47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h>
49
Wei Liue1f00a692013-05-22 06:34:45 +000050/* Provide an option to disable split event channels at load time as
51 * event channels are limited resource. Split event channels are
52 * enabled by default.
53 */
54bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644);
56
Wei Liu2810e5b2013-04-22 02:20:42 +000057/*
58 * This is the maximum slots a skb can have. If a guest sends a skb
59 * which exceeds this limit it is considered malicious.
60 */
Wei Liu37641492013-05-02 00:43:59 +000061#define FATAL_SKB_SLOTS_DEFAULT 20
62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63module_param(fatal_skb_slots, uint, 0444);
64
65/*
66 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67 * the maximum slots a valid packet can use. Now this value is defined
68 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
69 * all backend.
70 */
71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
Wei Liu2810e5b2013-04-22 02:20:42 +000072
Wei Liu2810e5b2013-04-22 02:20:42 +000073/*
74 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
75 * one or more merged tx requests, otherwise it is the continuation of
76 * previous tx request.
77 */
Wei Liub3f980b2013-08-26 12:59:38 +010078static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
Wei Liu2810e5b2013-04-22 02:20:42 +000079{
Wei Liub3f980b2013-08-26 12:59:38 +010080 return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
Wei Liu2810e5b2013-04-22 02:20:42 +000081}
82
Wei Liub3f980b2013-08-26 12:59:38 +010083static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx,
Matthew Daley7d5145d2013-02-06 23:41:36 +000084 u8 status);
Ian Campbellf942dc22011-03-15 00:06:18 +000085static void make_tx_response(struct xenvif *vif,
86 struct xen_netif_tx_request *txp,
87 s8 st);
Wei Liub3f980b2013-08-26 12:59:38 +010088
89static inline int tx_work_todo(struct xenvif *vif);
90static inline int rx_work_todo(struct xenvif *vif);
91
Ian Campbellf942dc22011-03-15 00:06:18 +000092static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
93 u16 id,
94 s8 st,
95 u16 offset,
96 u16 size,
97 u16 flags);
98
Wei Liub3f980b2013-08-26 12:59:38 +010099static inline unsigned long idx_to_pfn(struct xenvif *vif,
Ian Campbellea066ad2011-10-05 00:28:46 +0000100 u16 idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000101{
Wei Liub3f980b2013-08-26 12:59:38 +0100102 return page_to_pfn(vif->mmap_pages[idx]);
Ian Campbellf942dc22011-03-15 00:06:18 +0000103}
104
Wei Liub3f980b2013-08-26 12:59:38 +0100105static inline unsigned long idx_to_kaddr(struct xenvif *vif,
Ian Campbellea066ad2011-10-05 00:28:46 +0000106 u16 idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000107{
Wei Liub3f980b2013-08-26 12:59:38 +0100108 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
Ian Campbellf942dc22011-03-15 00:06:18 +0000109}
110
Ian Campbellf942dc22011-03-15 00:06:18 +0000111/*
112 * This is the amount of packet we copy rather than map, so that the
113 * guest can't fiddle with the contents of the headers while we do
114 * packet processing on them (netfilter, routing, etc).
115 */
116#define PKT_PROT_LEN (ETH_HLEN + \
117 VLAN_HLEN + \
118 sizeof(struct iphdr) + MAX_IPOPTLEN + \
119 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
120
Ian Campbellea066ad2011-10-05 00:28:46 +0000121static u16 frag_get_pending_idx(skb_frag_t *frag)
122{
123 return (u16)frag->page_offset;
124}
125
126static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
127{
128 frag->page_offset = pending_idx;
129}
130
Ian Campbellf942dc22011-03-15 00:06:18 +0000131static inline pending_ring_idx_t pending_index(unsigned i)
132{
133 return i & (MAX_PENDING_REQS-1);
134}
135
Wei Liub3f980b2013-08-26 12:59:38 +0100136static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000137{
138 return MAX_PENDING_REQS -
Wei Liub3f980b2013-08-26 12:59:38 +0100139 vif->pending_prod + vif->pending_cons;
Ian Campbellf942dc22011-03-15 00:06:18 +0000140}
141
142static int max_required_rx_slots(struct xenvif *vif)
143{
144 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
145
Wei Liu2810e5b2013-04-22 02:20:42 +0000146 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
Ian Campbellf942dc22011-03-15 00:06:18 +0000147 if (vif->can_sg || vif->gso || vif->gso_prefix)
148 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
149
150 return max;
151}
152
153int xen_netbk_rx_ring_full(struct xenvif *vif)
154{
155 RING_IDX peek = vif->rx_req_cons_peek;
156 RING_IDX needed = max_required_rx_slots(vif);
157
158 return ((vif->rx.sring->req_prod - peek) < needed) ||
159 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
160}
161
162int xen_netbk_must_stop_queue(struct xenvif *vif)
163{
164 if (!xen_netbk_rx_ring_full(vif))
165 return 0;
166
167 vif->rx.sring->req_event = vif->rx_req_cons_peek +
168 max_required_rx_slots(vif);
169 mb(); /* request notification /then/ check the queue */
170
171 return xen_netbk_rx_ring_full(vif);
172}
173
174/*
175 * Returns true if we should start a new receive buffer instead of
176 * adding 'size' bytes to a buffer which currently contains 'offset'
177 * bytes.
178 */
179static bool start_new_rx_buffer(int offset, unsigned long size, int head)
180{
181 /* simple case: we have completely filled the current buffer. */
182 if (offset == MAX_BUFFER_OFFSET)
183 return true;
184
185 /*
186 * complex case: start a fresh buffer if the current frag
187 * would overflow the current buffer but only if:
188 * (i) this frag would fit completely in the next buffer
189 * and (ii) there is already some data in the current buffer
190 * and (iii) this is not the head buffer.
191 *
192 * Where:
193 * - (i) stops us splitting a frag into two copies
194 * unless the frag is too large for a single buffer.
195 * - (ii) stops us from leaving a buffer pointlessly empty.
196 * - (iii) stops us leaving the first buffer
197 * empty. Strictly speaking this is already covered
198 * by (ii) but is explicitly checked because
199 * netfront relies on the first buffer being
200 * non-empty and can crash otherwise.
201 *
202 * This means we will effectively linearise small
203 * frags but do not needlessly split large buffers
204 * into multiple copies tend to give large frags their
205 * own buffers as before.
206 */
207 if ((offset + size > MAX_BUFFER_OFFSET) &&
208 (size <= MAX_BUFFER_OFFSET) && offset && !head)
209 return true;
210
211 return false;
212}
213
214/*
215 * Figure out how many ring slots we're going to need to send @skb to
216 * the guest. This function is essentially a dry run of
217 * netbk_gop_frag_copy.
218 */
219unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
220{
221 unsigned int count;
222 int i, copy_off;
223
Simon Grahame26b2032012-05-24 06:26:07 +0000224 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000225
226 copy_off = skb_headlen(skb) % PAGE_SIZE;
227
228 if (skb_shinfo(skb)->gso_size)
229 count++;
230
231 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000232 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
Ian Campbell6a8ed462012-10-10 03:48:42 +0000233 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
Ian Campbellf942dc22011-03-15 00:06:18 +0000234 unsigned long bytes;
Ian Campbell6a8ed462012-10-10 03:48:42 +0000235
236 offset &= ~PAGE_MASK;
237
Ian Campbellf942dc22011-03-15 00:06:18 +0000238 while (size > 0) {
Ian Campbell6a8ed462012-10-10 03:48:42 +0000239 BUG_ON(offset >= PAGE_SIZE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000240 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
241
Ian Campbell6a8ed462012-10-10 03:48:42 +0000242 bytes = PAGE_SIZE - offset;
243
244 if (bytes > size)
245 bytes = size;
246
247 if (start_new_rx_buffer(copy_off, bytes, 0)) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000248 count++;
249 copy_off = 0;
250 }
251
Ian Campbellf942dc22011-03-15 00:06:18 +0000252 if (copy_off + bytes > MAX_BUFFER_OFFSET)
253 bytes = MAX_BUFFER_OFFSET - copy_off;
254
255 copy_off += bytes;
Ian Campbell6a8ed462012-10-10 03:48:42 +0000256
257 offset += bytes;
Ian Campbellf942dc22011-03-15 00:06:18 +0000258 size -= bytes;
Ian Campbell6a8ed462012-10-10 03:48:42 +0000259
260 if (offset == PAGE_SIZE)
261 offset = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000262 }
263 }
264 return count;
265}
266
267struct netrx_pending_operations {
268 unsigned copy_prod, copy_cons;
269 unsigned meta_prod, meta_cons;
270 struct gnttab_copy *copy;
Wei Liub3f980b2013-08-26 12:59:38 +0100271 struct xenvif_rx_meta *meta;
Ian Campbellf942dc22011-03-15 00:06:18 +0000272 int copy_off;
273 grant_ref_t copy_gref;
274};
275
Wei Liub3f980b2013-08-26 12:59:38 +0100276static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
277 struct netrx_pending_operations *npo)
Ian Campbellf942dc22011-03-15 00:06:18 +0000278{
Wei Liub3f980b2013-08-26 12:59:38 +0100279 struct xenvif_rx_meta *meta;
Ian Campbellf942dc22011-03-15 00:06:18 +0000280 struct xen_netif_rx_request *req;
281
282 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
283
284 meta = npo->meta + npo->meta_prod++;
285 meta->gso_size = 0;
286 meta->size = 0;
287 meta->id = req->id;
288
289 npo->copy_off = 0;
290 npo->copy_gref = req->gref;
291
292 return meta;
293}
294
295/*
296 * Set up the grant operations for this fragment. If it's a flipping
297 * interface, we also set up the unmap request from here.
298 */
299static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
300 struct netrx_pending_operations *npo,
301 struct page *page, unsigned long size,
302 unsigned long offset, int *head)
303{
304 struct gnttab_copy *copy_gop;
Wei Liub3f980b2013-08-26 12:59:38 +0100305 struct xenvif_rx_meta *meta;
Ian Campbellf942dc22011-03-15 00:06:18 +0000306 unsigned long bytes;
307
308 /* Data must not cross a page boundary. */
Ian Campbell6a8ed462012-10-10 03:48:42 +0000309 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
Ian Campbellf942dc22011-03-15 00:06:18 +0000310
311 meta = npo->meta + npo->meta_prod - 1;
312
Ian Campbell6a8ed462012-10-10 03:48:42 +0000313 /* Skip unused frames from start of page */
314 page += offset >> PAGE_SHIFT;
315 offset &= ~PAGE_MASK;
316
Ian Campbellf942dc22011-03-15 00:06:18 +0000317 while (size > 0) {
Ian Campbell6a8ed462012-10-10 03:48:42 +0000318 BUG_ON(offset >= PAGE_SIZE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000319 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
320
Ian Campbell6a8ed462012-10-10 03:48:42 +0000321 bytes = PAGE_SIZE - offset;
322
323 if (bytes > size)
324 bytes = size;
325
326 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000327 /*
328 * Netfront requires there to be some data in the head
329 * buffer.
330 */
331 BUG_ON(*head);
332
333 meta = get_next_rx_buffer(vif, npo);
334 }
335
Ian Campbellf942dc22011-03-15 00:06:18 +0000336 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
337 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
338
339 copy_gop = npo->copy + npo->copy_prod++;
340 copy_gop->flags = GNTCOPY_dest_gref;
Wei Liub3f980b2013-08-26 12:59:38 +0100341 copy_gop->len = bytes;
342
Wei Liu43e9d192013-08-26 12:59:37 +0100343 copy_gop->source.domid = DOMID_SELF;
344 copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
Ian Campbellf942dc22011-03-15 00:06:18 +0000345 copy_gop->source.offset = offset;
Ian Campbellf942dc22011-03-15 00:06:18 +0000346
Wei Liub3f980b2013-08-26 12:59:38 +0100347 copy_gop->dest.domid = vif->domid;
Ian Campbellf942dc22011-03-15 00:06:18 +0000348 copy_gop->dest.offset = npo->copy_off;
349 copy_gop->dest.u.ref = npo->copy_gref;
Ian Campbellf942dc22011-03-15 00:06:18 +0000350
351 npo->copy_off += bytes;
352 meta->size += bytes;
353
354 offset += bytes;
355 size -= bytes;
356
Ian Campbell6a8ed462012-10-10 03:48:42 +0000357 /* Next frame */
358 if (offset == PAGE_SIZE && size) {
359 BUG_ON(!PageCompound(page));
360 page++;
361 offset = 0;
362 }
363
Ian Campbellf942dc22011-03-15 00:06:18 +0000364 /* Leave a gap for the GSO descriptor. */
365 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
366 vif->rx.req_cons++;
367
368 *head = 0; /* There must be something in this buffer now. */
369
370 }
371}
372
373/*
374 * Prepare an SKB to be transmitted to the frontend.
375 *
376 * This function is responsible for allocating grant operations, meta
377 * structures, etc.
378 *
379 * It returns the number of meta structures consumed. The number of
380 * ring slots used is always equal to the number of meta slots used
381 * plus the number of GSO descriptors used. Currently, we use either
382 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
383 * frontend-side LRO).
384 */
385static int netbk_gop_skb(struct sk_buff *skb,
386 struct netrx_pending_operations *npo)
387{
388 struct xenvif *vif = netdev_priv(skb->dev);
389 int nr_frags = skb_shinfo(skb)->nr_frags;
390 int i;
391 struct xen_netif_rx_request *req;
Wei Liub3f980b2013-08-26 12:59:38 +0100392 struct xenvif_rx_meta *meta;
Ian Campbellf942dc22011-03-15 00:06:18 +0000393 unsigned char *data;
394 int head = 1;
395 int old_meta_prod;
396
397 old_meta_prod = npo->meta_prod;
398
399 /* Set up a GSO prefix descriptor, if necessary */
400 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
401 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
402 meta = npo->meta + npo->meta_prod++;
403 meta->gso_size = skb_shinfo(skb)->gso_size;
404 meta->size = 0;
405 meta->id = req->id;
406 }
407
408 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
409 meta = npo->meta + npo->meta_prod++;
410
411 if (!vif->gso_prefix)
412 meta->gso_size = skb_shinfo(skb)->gso_size;
413 else
414 meta->gso_size = 0;
415
416 meta->size = 0;
417 meta->id = req->id;
418 npo->copy_off = 0;
419 npo->copy_gref = req->gref;
420
421 data = skb->data;
422 while (data < skb_tail_pointer(skb)) {
423 unsigned int offset = offset_in_page(data);
424 unsigned int len = PAGE_SIZE - offset;
425
426 if (data + len > skb_tail_pointer(skb))
427 len = skb_tail_pointer(skb) - data;
428
429 netbk_gop_frag_copy(vif, skb, npo,
430 virt_to_page(data), len, offset, &head);
431 data += len;
432 }
433
434 for (i = 0; i < nr_frags; i++) {
435 netbk_gop_frag_copy(vif, skb, npo,
Ian Campbellea066ad2011-10-05 00:28:46 +0000436 skb_frag_page(&skb_shinfo(skb)->frags[i]),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000437 skb_frag_size(&skb_shinfo(skb)->frags[i]),
Ian Campbellf942dc22011-03-15 00:06:18 +0000438 skb_shinfo(skb)->frags[i].page_offset,
439 &head);
440 }
441
442 return npo->meta_prod - old_meta_prod;
443}
444
445/*
446 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
447 * used to set up the operations on the top of
448 * netrx_pending_operations, which have since been done. Check that
449 * they didn't give any errors and advance over them.
450 */
451static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
452 struct netrx_pending_operations *npo)
453{
454 struct gnttab_copy *copy_op;
455 int status = XEN_NETIF_RSP_OKAY;
456 int i;
457
458 for (i = 0; i < nr_meta_slots; i++) {
459 copy_op = npo->copy + npo->copy_cons++;
460 if (copy_op->status != GNTST_okay) {
461 netdev_dbg(vif->dev,
462 "Bad status %d from copy to DOM%d.\n",
463 copy_op->status, vif->domid);
464 status = XEN_NETIF_RSP_ERROR;
465 }
466 }
467
468 return status;
469}
470
471static void netbk_add_frag_responses(struct xenvif *vif, int status,
Wei Liub3f980b2013-08-26 12:59:38 +0100472 struct xenvif_rx_meta *meta,
Ian Campbellf942dc22011-03-15 00:06:18 +0000473 int nr_meta_slots)
474{
475 int i;
476 unsigned long offset;
477
478 /* No fragments used */
479 if (nr_meta_slots <= 1)
480 return;
481
482 nr_meta_slots--;
483
484 for (i = 0; i < nr_meta_slots; i++) {
485 int flags;
486 if (i == nr_meta_slots - 1)
487 flags = 0;
488 else
489 flags = XEN_NETRXF_more_data;
490
491 offset = 0;
492 make_rx_response(vif, meta[i].id, status, offset,
493 meta[i].size, flags);
494 }
495}
496
497struct skb_cb_overlay {
498 int meta_slots_used;
499};
500
Wei Liub3f980b2013-08-26 12:59:38 +0100501static void xen_netbk_kick_thread(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000502{
Wei Liub3f980b2013-08-26 12:59:38 +0100503 wake_up(&vif->wq);
504}
505
506void xen_netbk_rx_action(struct xenvif *vif)
507{
Ian Campbellf942dc22011-03-15 00:06:18 +0000508 s8 status;
Wei Liue1f00a692013-05-22 06:34:45 +0000509 u16 flags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000510 struct xen_netif_rx_response *resp;
511 struct sk_buff_head rxq;
512 struct sk_buff *skb;
513 LIST_HEAD(notify);
514 int ret;
515 int nr_frags;
516 int count;
517 unsigned long offset;
518 struct skb_cb_overlay *sco;
Wei Liub3f980b2013-08-26 12:59:38 +0100519 int need_to_notify = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000520
521 struct netrx_pending_operations npo = {
Wei Liub3f980b2013-08-26 12:59:38 +0100522 .copy = vif->grant_copy_op,
523 .meta = vif->meta,
Ian Campbellf942dc22011-03-15 00:06:18 +0000524 };
525
526 skb_queue_head_init(&rxq);
527
528 count = 0;
529
Wei Liub3f980b2013-08-26 12:59:38 +0100530 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000531 vif = netdev_priv(skb->dev);
532 nr_frags = skb_shinfo(skb)->nr_frags;
533
534 sco = (struct skb_cb_overlay *)skb->cb;
535 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
536
537 count += nr_frags + 1;
538
539 __skb_queue_tail(&rxq, skb);
540
541 /* Filled the batch queue? */
Wei Liu2810e5b2013-04-22 02:20:42 +0000542 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
Ian Campbellf942dc22011-03-15 00:06:18 +0000543 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
544 break;
545 }
546
Wei Liub3f980b2013-08-26 12:59:38 +0100547 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
Ian Campbellf942dc22011-03-15 00:06:18 +0000548
549 if (!npo.copy_prod)
550 return;
551
Wei Liub3f980b2013-08-26 12:59:38 +0100552 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
553 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
Ian Campbellf942dc22011-03-15 00:06:18 +0000554
555 while ((skb = __skb_dequeue(&rxq)) != NULL) {
556 sco = (struct skb_cb_overlay *)skb->cb;
557
558 vif = netdev_priv(skb->dev);
559
Wei Liub3f980b2013-08-26 12:59:38 +0100560 if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000561 resp = RING_GET_RESPONSE(&vif->rx,
Wei Liub3f980b2013-08-26 12:59:38 +0100562 vif->rx.rsp_prod_pvt++);
Ian Campbellf942dc22011-03-15 00:06:18 +0000563
564 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
565
Wei Liub3f980b2013-08-26 12:59:38 +0100566 resp->offset = vif->meta[npo.meta_cons].gso_size;
567 resp->id = vif->meta[npo.meta_cons].id;
Ian Campbellf942dc22011-03-15 00:06:18 +0000568 resp->status = sco->meta_slots_used;
569
570 npo.meta_cons++;
571 sco->meta_slots_used--;
572 }
573
574
575 vif->dev->stats.tx_bytes += skb->len;
576 vif->dev->stats.tx_packets++;
577
578 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
579
580 if (sco->meta_slots_used == 1)
581 flags = 0;
582 else
583 flags = XEN_NETRXF_more_data;
584
585 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
586 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
587 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
588 /* remote but checksummed. */
589 flags |= XEN_NETRXF_data_validated;
590
591 offset = 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100592 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
Ian Campbellf942dc22011-03-15 00:06:18 +0000593 status, offset,
Wei Liub3f980b2013-08-26 12:59:38 +0100594 vif->meta[npo.meta_cons].size,
Ian Campbellf942dc22011-03-15 00:06:18 +0000595 flags);
596
Wei Liub3f980b2013-08-26 12:59:38 +0100597 if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000598 struct xen_netif_extra_info *gso =
599 (struct xen_netif_extra_info *)
600 RING_GET_RESPONSE(&vif->rx,
601 vif->rx.rsp_prod_pvt++);
602
603 resp->flags |= XEN_NETRXF_extra_info;
604
Wei Liub3f980b2013-08-26 12:59:38 +0100605 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
Ian Campbellf942dc22011-03-15 00:06:18 +0000606 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
607 gso->u.gso.pad = 0;
608 gso->u.gso.features = 0;
609
610 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
611 gso->flags = 0;
612 }
613
614 netbk_add_frag_responses(vif, status,
Wei Liub3f980b2013-08-26 12:59:38 +0100615 vif->meta + npo.meta_cons + 1,
Ian Campbellf942dc22011-03-15 00:06:18 +0000616 sco->meta_slots_used);
617
618 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
Ian Campbellf942dc22011-03-15 00:06:18 +0000619
Wei Liub3f980b2013-08-26 12:59:38 +0100620 if (ret)
621 need_to_notify = 1;
622
Ian Campbellf942dc22011-03-15 00:06:18 +0000623 xenvif_notify_tx_completion(vif);
624
Ian Campbellf942dc22011-03-15 00:06:18 +0000625 npo.meta_cons += sco->meta_slots_used;
626 dev_kfree_skb(skb);
627 }
628
Wei Liub3f980b2013-08-26 12:59:38 +0100629 if (need_to_notify)
Wei Liue1f00a692013-05-22 06:34:45 +0000630 notify_remote_via_irq(vif->rx_irq);
Ian Campbellf942dc22011-03-15 00:06:18 +0000631
632 /* More work to do? */
Wei Liub3f980b2013-08-26 12:59:38 +0100633 if (!skb_queue_empty(&vif->rx_queue))
634 xen_netbk_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000635}
636
637void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
638{
Wei Liub3f980b2013-08-26 12:59:38 +0100639 skb_queue_tail(&vif->rx_queue, skb);
Ian Campbellf942dc22011-03-15 00:06:18 +0000640
Wei Liub3f980b2013-08-26 12:59:38 +0100641 xen_netbk_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000642}
643
644void xen_netbk_check_rx_xenvif(struct xenvif *vif)
645{
646 int more_to_do;
647
648 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
649
650 if (more_to_do)
Wei Liub3f980b2013-08-26 12:59:38 +0100651 napi_schedule(&vif->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +0000652}
653
654static void tx_add_credit(struct xenvif *vif)
655{
656 unsigned long max_burst, max_credit;
657
658 /*
659 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
660 * Otherwise the interface can seize up due to insufficient credit.
661 */
662 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
663 max_burst = min(max_burst, 131072UL);
664 max_burst = max(max_burst, vif->credit_bytes);
665
666 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
667 max_credit = vif->remaining_credit + vif->credit_bytes;
668 if (max_credit < vif->remaining_credit)
669 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
670
671 vif->remaining_credit = min(max_credit, max_burst);
672}
673
674static void tx_credit_callback(unsigned long data)
675{
676 struct xenvif *vif = (struct xenvif *)data;
677 tx_add_credit(vif);
678 xen_netbk_check_rx_xenvif(vif);
679}
680
681static void netbk_tx_err(struct xenvif *vif,
682 struct xen_netif_tx_request *txp, RING_IDX end)
683{
684 RING_IDX cons = vif->tx.req_cons;
685
686 do {
687 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
Ian Campbellb9149722013-02-06 23:41:38 +0000688 if (cons == end)
Ian Campbellf942dc22011-03-15 00:06:18 +0000689 break;
690 txp = RING_GET_REQUEST(&vif->tx, cons++);
691 } while (1);
692 vif->tx.req_cons = cons;
Ian Campbellf942dc22011-03-15 00:06:18 +0000693}
694
Ian Campbell488562862013-02-06 23:41:35 +0000695static void netbk_fatal_tx_err(struct xenvif *vif)
696{
697 netdev_err(vif->dev, "fatal error; disabling device\n");
698 xenvif_carrier_off(vif);
Ian Campbell488562862013-02-06 23:41:35 +0000699}
700
Ian Campbellf942dc22011-03-15 00:06:18 +0000701static int netbk_count_requests(struct xenvif *vif,
702 struct xen_netif_tx_request *first,
703 struct xen_netif_tx_request *txp,
704 int work_to_do)
705{
706 RING_IDX cons = vif->tx.req_cons;
Wei Liu2810e5b2013-04-22 02:20:42 +0000707 int slots = 0;
708 int drop_err = 0;
Wei Liu59ccb4e2013-05-02 00:43:58 +0000709 int more_data;
Ian Campbellf942dc22011-03-15 00:06:18 +0000710
711 if (!(first->flags & XEN_NETTXF_more_data))
712 return 0;
713
714 do {
Wei Liu59ccb4e2013-05-02 00:43:58 +0000715 struct xen_netif_tx_request dropped_tx = { 0 };
716
Wei Liu2810e5b2013-04-22 02:20:42 +0000717 if (slots >= work_to_do) {
718 netdev_err(vif->dev,
719 "Asked for %d slots but exceeds this limit\n",
720 work_to_do);
Ian Campbell488562862013-02-06 23:41:35 +0000721 netbk_fatal_tx_err(vif);
David Vrabel35876b52013-02-14 03:18:57 +0000722 return -ENODATA;
Ian Campbellf942dc22011-03-15 00:06:18 +0000723 }
724
Wei Liu2810e5b2013-04-22 02:20:42 +0000725 /* This guest is really using too many slots and
726 * considered malicious.
727 */
Wei Liu37641492013-05-02 00:43:59 +0000728 if (unlikely(slots >= fatal_skb_slots)) {
Wei Liu2810e5b2013-04-22 02:20:42 +0000729 netdev_err(vif->dev,
730 "Malicious frontend using %d slots, threshold %u\n",
Wei Liu37641492013-05-02 00:43:59 +0000731 slots, fatal_skb_slots);
Ian Campbell488562862013-02-06 23:41:35 +0000732 netbk_fatal_tx_err(vif);
David Vrabel35876b52013-02-14 03:18:57 +0000733 return -E2BIG;
Ian Campbellf942dc22011-03-15 00:06:18 +0000734 }
735
Wei Liu2810e5b2013-04-22 02:20:42 +0000736 /* Xen network protocol had implicit dependency on
Wei Liu37641492013-05-02 00:43:59 +0000737 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
738 * the historical MAX_SKB_FRAGS value 18 to honor the
739 * same behavior as before. Any packet using more than
740 * 18 slots but less than fatal_skb_slots slots is
741 * dropped
Wei Liu2810e5b2013-04-22 02:20:42 +0000742 */
Wei Liu37641492013-05-02 00:43:59 +0000743 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
Wei Liu2810e5b2013-04-22 02:20:42 +0000744 if (net_ratelimit())
745 netdev_dbg(vif->dev,
746 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
Wei Liu37641492013-05-02 00:43:59 +0000747 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
Wei Liu2810e5b2013-04-22 02:20:42 +0000748 drop_err = -E2BIG;
749 }
750
Wei Liu59ccb4e2013-05-02 00:43:58 +0000751 if (drop_err)
752 txp = &dropped_tx;
753
Wei Liu2810e5b2013-04-22 02:20:42 +0000754 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
Ian Campbellf942dc22011-03-15 00:06:18 +0000755 sizeof(*txp));
Wei Liu03393fd52013-04-22 02:20:43 +0000756
757 /* If the guest submitted a frame >= 64 KiB then
758 * first->size overflowed and following slots will
759 * appear to be larger than the frame.
760 *
761 * This cannot be fatal error as there are buggy
762 * frontends that do this.
763 *
764 * Consume all slots and drop the packet.
765 */
766 if (!drop_err && txp->size > first->size) {
767 if (net_ratelimit())
768 netdev_dbg(vif->dev,
769 "Invalid tx request, slot size %u > remaining size %u\n",
770 txp->size, first->size);
771 drop_err = -EIO;
Ian Campbellf942dc22011-03-15 00:06:18 +0000772 }
773
774 first->size -= txp->size;
Wei Liu2810e5b2013-04-22 02:20:42 +0000775 slots++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000776
777 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
Wei Liu2810e5b2013-04-22 02:20:42 +0000778 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
Ian Campbellf942dc22011-03-15 00:06:18 +0000779 txp->offset, txp->size);
Ian Campbell488562862013-02-06 23:41:35 +0000780 netbk_fatal_tx_err(vif);
David Vrabel35876b52013-02-14 03:18:57 +0000781 return -EINVAL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000782 }
Wei Liu59ccb4e2013-05-02 00:43:58 +0000783
784 more_data = txp->flags & XEN_NETTXF_more_data;
785
786 if (!drop_err)
787 txp++;
788
789 } while (more_data);
Wei Liu2810e5b2013-04-22 02:20:42 +0000790
791 if (drop_err) {
Wei Liuac69c262013-05-02 00:43:57 +0000792 netbk_tx_err(vif, first, cons + slots);
Wei Liu2810e5b2013-04-22 02:20:42 +0000793 return drop_err;
794 }
795
796 return slots;
Ian Campbellf942dc22011-03-15 00:06:18 +0000797}
798
Wei Liub3f980b2013-08-26 12:59:38 +0100799static struct page *xen_netbk_alloc_page(struct xenvif *vif,
Ian Campbellea066ad2011-10-05 00:28:46 +0000800 u16 pending_idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000801{
802 struct page *page;
Wei Liub3f980b2013-08-26 12:59:38 +0100803
804 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
Ian Campbellf942dc22011-03-15 00:06:18 +0000805 if (!page)
806 return NULL;
Wei Liub3f980b2013-08-26 12:59:38 +0100807 vif->mmap_pages[pending_idx] = page;
808
Ian Campbellf942dc22011-03-15 00:06:18 +0000809 return page;
810}
811
Wei Liub3f980b2013-08-26 12:59:38 +0100812static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
Ian Campbellf942dc22011-03-15 00:06:18 +0000813 struct sk_buff *skb,
814 struct xen_netif_tx_request *txp,
815 struct gnttab_copy *gop)
816{
817 struct skb_shared_info *shinfo = skb_shinfo(skb);
818 skb_frag_t *frags = shinfo->frags;
Ian Campbellea066ad2011-10-05 00:28:46 +0000819 u16 pending_idx = *((u16 *)skb->data);
Wei Liu2810e5b2013-04-22 02:20:42 +0000820 u16 head_idx = 0;
821 int slot, start;
822 struct page *page;
823 pending_ring_idx_t index, start_idx = 0;
824 uint16_t dst_offset;
825 unsigned int nr_slots;
826 struct pending_tx_info *first = NULL;
827
828 /* At this point shinfo->nr_frags is in fact the number of
Wei Liu37641492013-05-02 00:43:59 +0000829 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
Wei Liu2810e5b2013-04-22 02:20:42 +0000830 */
831 nr_slots = shinfo->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000832
833 /* Skip first skb fragment if it is on same page as header fragment. */
Ian Campbellea066ad2011-10-05 00:28:46 +0000834 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +0000835
Wei Liu2810e5b2013-04-22 02:20:42 +0000836 /* Coalesce tx requests, at this point the packet passed in
837 * should be <= 64K. Any packets larger than 64K have been
838 * handled in netbk_count_requests().
839 */
840 for (shinfo->nr_frags = slot = start; slot < nr_slots;
841 shinfo->nr_frags++) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000842 struct pending_tx_info *pending_tx_info =
Wei Liub3f980b2013-08-26 12:59:38 +0100843 vif->pending_tx_info;
Ian Campbellf942dc22011-03-15 00:06:18 +0000844
Wei Liub3f980b2013-08-26 12:59:38 +0100845 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
Ian Campbellf942dc22011-03-15 00:06:18 +0000846 if (!page)
Ian Campbell4cc7c1c2013-02-06 23:41:37 +0000847 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +0000848
Wei Liu2810e5b2013-04-22 02:20:42 +0000849 dst_offset = 0;
850 first = NULL;
851 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
852 gop->flags = GNTCOPY_source_gref;
Ian Campbellf942dc22011-03-15 00:06:18 +0000853
Wei Liu2810e5b2013-04-22 02:20:42 +0000854 gop->source.u.ref = txp->gref;
855 gop->source.domid = vif->domid;
856 gop->source.offset = txp->offset;
Ian Campbellf942dc22011-03-15 00:06:18 +0000857
Wei Liu2810e5b2013-04-22 02:20:42 +0000858 gop->dest.domid = DOMID_SELF;
Ian Campbellf942dc22011-03-15 00:06:18 +0000859
Wei Liu2810e5b2013-04-22 02:20:42 +0000860 gop->dest.offset = dst_offset;
861 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
Ian Campbellf942dc22011-03-15 00:06:18 +0000862
Wei Liu2810e5b2013-04-22 02:20:42 +0000863 if (dst_offset + txp->size > PAGE_SIZE) {
864 /* This page can only merge a portion
865 * of tx request. Do not increment any
866 * pointer / counter here. The txp
867 * will be dealt with in future
868 * rounds, eventually hitting the
869 * `else` branch.
870 */
871 gop->len = PAGE_SIZE - dst_offset;
872 txp->offset += gop->len;
873 txp->size -= gop->len;
874 dst_offset += gop->len; /* quit loop */
875 } else {
876 /* This tx request can be merged in the page */
877 gop->len = txp->size;
878 dst_offset += gop->len;
879
Wei Liub3f980b2013-08-26 12:59:38 +0100880 index = pending_index(vif->pending_cons++);
Wei Liu2810e5b2013-04-22 02:20:42 +0000881
Wei Liub3f980b2013-08-26 12:59:38 +0100882 pending_idx = vif->pending_ring[index];
Wei Liu2810e5b2013-04-22 02:20:42 +0000883
884 memcpy(&pending_tx_info[pending_idx].req, txp,
885 sizeof(*txp));
Wei Liu2810e5b2013-04-22 02:20:42 +0000886
887 /* Poison these fields, corresponding
888 * fields for head tx req will be set
889 * to correct values after the loop.
890 */
Wei Liub3f980b2013-08-26 12:59:38 +0100891 vif->mmap_pages[pending_idx] = (void *)(~0UL);
Wei Liu2810e5b2013-04-22 02:20:42 +0000892 pending_tx_info[pending_idx].head =
893 INVALID_PENDING_RING_IDX;
894
895 if (!first) {
896 first = &pending_tx_info[pending_idx];
897 start_idx = index;
898 head_idx = pending_idx;
899 }
900
901 txp++;
902 slot++;
903 }
904
905 gop++;
906 }
907
908 first->req.offset = 0;
909 first->req.size = dst_offset;
910 first->head = start_idx;
Wei Liub3f980b2013-08-26 12:59:38 +0100911 vif->mmap_pages[head_idx] = page;
Wei Liu2810e5b2013-04-22 02:20:42 +0000912 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +0000913 }
914
Wei Liu2810e5b2013-04-22 02:20:42 +0000915 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
916
Ian Campbellf942dc22011-03-15 00:06:18 +0000917 return gop;
Ian Campbell4cc7c1c2013-02-06 23:41:37 +0000918err:
919 /* Unwind, freeing all pages and sending error responses. */
Wei Liu2810e5b2013-04-22 02:20:42 +0000920 while (shinfo->nr_frags-- > start) {
Wei Liub3f980b2013-08-26 12:59:38 +0100921 xen_netbk_idx_release(vif,
Wei Liu2810e5b2013-04-22 02:20:42 +0000922 frag_get_pending_idx(&frags[shinfo->nr_frags]),
923 XEN_NETIF_RSP_ERROR);
Ian Campbell4cc7c1c2013-02-06 23:41:37 +0000924 }
925 /* The head too, if necessary. */
926 if (start)
Wei Liub3f980b2013-08-26 12:59:38 +0100927 xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbell4cc7c1c2013-02-06 23:41:37 +0000928
929 return NULL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000930}
931
Wei Liub3f980b2013-08-26 12:59:38 +0100932static int xen_netbk_tx_check_gop(struct xenvif *vif,
Ian Campbellf942dc22011-03-15 00:06:18 +0000933 struct sk_buff *skb,
934 struct gnttab_copy **gopp)
935{
936 struct gnttab_copy *gop = *gopp;
Ian Campbellea066ad2011-10-05 00:28:46 +0000937 u16 pending_idx = *((u16 *)skb->data);
Ian Campbellf942dc22011-03-15 00:06:18 +0000938 struct skb_shared_info *shinfo = skb_shinfo(skb);
Wei Liu2810e5b2013-04-22 02:20:42 +0000939 struct pending_tx_info *tx_info;
Ian Campbellf942dc22011-03-15 00:06:18 +0000940 int nr_frags = shinfo->nr_frags;
941 int i, err, start;
Wei Liu2810e5b2013-04-22 02:20:42 +0000942 u16 peek; /* peek into next tx request */
Ian Campbellf942dc22011-03-15 00:06:18 +0000943
944 /* Check status of header. */
945 err = gop->status;
Matthew Daley7d5145d2013-02-06 23:41:36 +0000946 if (unlikely(err))
Wei Liub3f980b2013-08-26 12:59:38 +0100947 xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbellf942dc22011-03-15 00:06:18 +0000948
949 /* Skip first skb fragment if it is on same page as header fragment. */
Ian Campbellea066ad2011-10-05 00:28:46 +0000950 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +0000951
952 for (i = start; i < nr_frags; i++) {
953 int j, newerr;
Wei Liu2810e5b2013-04-22 02:20:42 +0000954 pending_ring_idx_t head;
Ian Campbellf942dc22011-03-15 00:06:18 +0000955
Ian Campbellea066ad2011-10-05 00:28:46 +0000956 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
Wei Liub3f980b2013-08-26 12:59:38 +0100957 tx_info = &vif->pending_tx_info[pending_idx];
Wei Liu2810e5b2013-04-22 02:20:42 +0000958 head = tx_info->head;
Ian Campbellf942dc22011-03-15 00:06:18 +0000959
960 /* Check error status: if okay then remember grant handle. */
Wei Liu2810e5b2013-04-22 02:20:42 +0000961 do {
962 newerr = (++gop)->status;
963 if (newerr)
964 break;
Wei Liub3f980b2013-08-26 12:59:38 +0100965 peek = vif->pending_ring[pending_index(++head)];
966 } while (!pending_tx_is_head(vif, peek));
Wei Liu2810e5b2013-04-22 02:20:42 +0000967
Ian Campbellf942dc22011-03-15 00:06:18 +0000968 if (likely(!newerr)) {
969 /* Had a previous error? Invalidate this fragment. */
970 if (unlikely(err))
Wei Liub3f980b2013-08-26 12:59:38 +0100971 xen_netbk_idx_release(vif, pending_idx,
972 XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +0000973 continue;
974 }
975
976 /* Error on this fragment: respond to client with an error. */
Wei Liub3f980b2013-08-26 12:59:38 +0100977 xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbellf942dc22011-03-15 00:06:18 +0000978
979 /* Not the first error? Preceding frags already invalidated. */
980 if (err)
981 continue;
982
983 /* First error: invalidate header and preceding fragments. */
984 pending_idx = *((u16 *)skb->data);
Wei Liub3f980b2013-08-26 12:59:38 +0100985 xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +0000986 for (j = start; j < i; j++) {
Jan Beulich5ccb3ea2011-11-18 05:42:05 +0000987 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
Wei Liub3f980b2013-08-26 12:59:38 +0100988 xen_netbk_idx_release(vif, pending_idx,
989 XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +0000990 }
991
992 /* Remember the error: invalidate all subsequent fragments. */
993 err = newerr;
994 }
995
996 *gopp = gop + 1;
997 return err;
998}
999
Wei Liub3f980b2013-08-26 12:59:38 +01001000static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb)
Ian Campbellf942dc22011-03-15 00:06:18 +00001001{
1002 struct skb_shared_info *shinfo = skb_shinfo(skb);
1003 int nr_frags = shinfo->nr_frags;
1004 int i;
1005
1006 for (i = 0; i < nr_frags; i++) {
1007 skb_frag_t *frag = shinfo->frags + i;
1008 struct xen_netif_tx_request *txp;
Ian Campbellea066ad2011-10-05 00:28:46 +00001009 struct page *page;
1010 u16 pending_idx;
Ian Campbellf942dc22011-03-15 00:06:18 +00001011
Ian Campbellea066ad2011-10-05 00:28:46 +00001012 pending_idx = frag_get_pending_idx(frag);
Ian Campbellf942dc22011-03-15 00:06:18 +00001013
Wei Liub3f980b2013-08-26 12:59:38 +01001014 txp = &vif->pending_tx_info[pending_idx].req;
1015 page = virt_to_page(idx_to_kaddr(vif, pending_idx));
Ian Campbellea066ad2011-10-05 00:28:46 +00001016 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
Ian Campbellf942dc22011-03-15 00:06:18 +00001017 skb->len += txp->size;
1018 skb->data_len += txp->size;
1019 skb->truesize += txp->size;
1020
1021 /* Take an extra reference to offset xen_netbk_idx_release */
Wei Liub3f980b2013-08-26 12:59:38 +01001022 get_page(vif->mmap_pages[pending_idx]);
1023 xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001024 }
1025}
1026
1027static int xen_netbk_get_extras(struct xenvif *vif,
1028 struct xen_netif_extra_info *extras,
1029 int work_to_do)
1030{
1031 struct xen_netif_extra_info extra;
1032 RING_IDX cons = vif->tx.req_cons;
1033
1034 do {
1035 if (unlikely(work_to_do-- <= 0)) {
Ian Campbell488562862013-02-06 23:41:35 +00001036 netdev_err(vif->dev, "Missing extra info\n");
1037 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001038 return -EBADR;
1039 }
1040
1041 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1042 sizeof(extra));
1043 if (unlikely(!extra.type ||
1044 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1045 vif->tx.req_cons = ++cons;
Ian Campbell488562862013-02-06 23:41:35 +00001046 netdev_err(vif->dev,
Ian Campbellf942dc22011-03-15 00:06:18 +00001047 "Invalid extra type: %d\n", extra.type);
Ian Campbell488562862013-02-06 23:41:35 +00001048 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001049 return -EINVAL;
1050 }
1051
1052 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1053 vif->tx.req_cons = ++cons;
1054 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1055
1056 return work_to_do;
1057}
1058
1059static int netbk_set_skb_gso(struct xenvif *vif,
1060 struct sk_buff *skb,
1061 struct xen_netif_extra_info *gso)
1062{
1063 if (!gso->u.gso.size) {
Ian Campbell488562862013-02-06 23:41:35 +00001064 netdev_err(vif->dev, "GSO size must not be zero.\n");
1065 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001066 return -EINVAL;
1067 }
1068
1069 /* Currently only TCPv4 S.O. is supported. */
1070 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
Ian Campbell488562862013-02-06 23:41:35 +00001071 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1072 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001073 return -EINVAL;
1074 }
1075
1076 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1077 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1078
1079 /* Header must be checked, and gso_segs computed. */
1080 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1081 skb_shinfo(skb)->gso_segs = 0;
1082
1083 return 0;
1084}
1085
1086static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1087{
1088 struct iphdr *iph;
Ian Campbellf942dc22011-03-15 00:06:18 +00001089 int err = -EPROTO;
1090 int recalculate_partial_csum = 0;
1091
1092 /*
1093 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1094 * peers can fail to set NETRXF_csum_blank when sending a GSO
1095 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1096 * recalculate the partial checksum.
1097 */
1098 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1099 vif->rx_gso_checksum_fixup++;
1100 skb->ip_summed = CHECKSUM_PARTIAL;
1101 recalculate_partial_csum = 1;
1102 }
1103
1104 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1105 if (skb->ip_summed != CHECKSUM_PARTIAL)
1106 return 0;
1107
1108 if (skb->protocol != htons(ETH_P_IP))
1109 goto out;
1110
1111 iph = (void *)skb->data;
Ian Campbellf942dc22011-03-15 00:06:18 +00001112 switch (iph->protocol) {
1113 case IPPROTO_TCP:
Jason Wangbea89332013-04-10 20:35:29 +00001114 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1115 offsetof(struct tcphdr, check)))
1116 goto out;
Ian Campbellf942dc22011-03-15 00:06:18 +00001117
1118 if (recalculate_partial_csum) {
Jason Wangbea89332013-04-10 20:35:29 +00001119 struct tcphdr *tcph = tcp_hdr(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001120 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1121 skb->len - iph->ihl*4,
1122 IPPROTO_TCP, 0);
1123 }
1124 break;
1125 case IPPROTO_UDP:
Jason Wangbea89332013-04-10 20:35:29 +00001126 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1127 offsetof(struct udphdr, check)))
1128 goto out;
Ian Campbellf942dc22011-03-15 00:06:18 +00001129
1130 if (recalculate_partial_csum) {
Jason Wangbea89332013-04-10 20:35:29 +00001131 struct udphdr *udph = udp_hdr(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001132 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1133 skb->len - iph->ihl*4,
1134 IPPROTO_UDP, 0);
1135 }
1136 break;
1137 default:
1138 if (net_ratelimit())
1139 netdev_err(vif->dev,
1140 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1141 iph->protocol);
1142 goto out;
1143 }
1144
Ian Campbellf942dc22011-03-15 00:06:18 +00001145 err = 0;
1146
1147out:
1148 return err;
1149}
1150
1151static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1152{
1153 unsigned long now = jiffies;
1154 unsigned long next_credit =
1155 vif->credit_timeout.expires +
1156 msecs_to_jiffies(vif->credit_usec / 1000);
1157
1158 /* Timer could already be pending in rare cases. */
1159 if (timer_pending(&vif->credit_timeout))
1160 return true;
1161
1162 /* Passed the point where we can replenish credit? */
1163 if (time_after_eq(now, next_credit)) {
1164 vif->credit_timeout.expires = now;
1165 tx_add_credit(vif);
1166 }
1167
1168 /* Still too big to send right now? Set a callback. */
1169 if (size > vif->remaining_credit) {
1170 vif->credit_timeout.data =
1171 (unsigned long)vif;
1172 vif->credit_timeout.function =
1173 tx_credit_callback;
1174 mod_timer(&vif->credit_timeout,
1175 next_credit);
1176
1177 return true;
1178 }
1179
1180 return false;
1181}
1182
Wei Liub3f980b2013-08-26 12:59:38 +01001183static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +00001184{
Wei Liub3f980b2013-08-26 12:59:38 +01001185 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
Ian Campbellf942dc22011-03-15 00:06:18 +00001186 struct sk_buff *skb;
1187 int ret;
1188
Wei Liub3f980b2013-08-26 12:59:38 +01001189 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1190 < MAX_PENDING_REQS)) {
Ian Campbellf942dc22011-03-15 00:06:18 +00001191 struct xen_netif_tx_request txreq;
Wei Liu37641492013-05-02 00:43:59 +00001192 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
Ian Campbellf942dc22011-03-15 00:06:18 +00001193 struct page *page;
1194 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1195 u16 pending_idx;
1196 RING_IDX idx;
1197 int work_to_do;
1198 unsigned int data_len;
1199 pending_ring_idx_t index;
1200
Ian Campbell488562862013-02-06 23:41:35 +00001201 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1202 XEN_NETIF_TX_RING_SIZE) {
1203 netdev_err(vif->dev,
1204 "Impossible number of requests. "
1205 "req_prod %d, req_cons %d, size %ld\n",
1206 vif->tx.sring->req_prod, vif->tx.req_cons,
1207 XEN_NETIF_TX_RING_SIZE);
1208 netbk_fatal_tx_err(vif);
1209 continue;
1210 }
1211
Ian Campbellf942dc22011-03-15 00:06:18 +00001212 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
Wei Liub3f980b2013-08-26 12:59:38 +01001213 if (!work_to_do)
1214 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001215
1216 idx = vif->tx.req_cons;
1217 rmb(); /* Ensure that we see the request before we copy it. */
1218 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1219
1220 /* Credit-based scheduling. */
1221 if (txreq.size > vif->remaining_credit &&
Wei Liub3f980b2013-08-26 12:59:38 +01001222 tx_credit_exceeded(vif, txreq.size))
1223 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001224
1225 vif->remaining_credit -= txreq.size;
1226
1227 work_to_do--;
1228 vif->tx.req_cons = ++idx;
1229
1230 memset(extras, 0, sizeof(extras));
1231 if (txreq.flags & XEN_NETTXF_extra_info) {
1232 work_to_do = xen_netbk_get_extras(vif, extras,
1233 work_to_do);
1234 idx = vif->tx.req_cons;
Ian Campbell488562862013-02-06 23:41:35 +00001235 if (unlikely(work_to_do < 0))
Wei Liub3f980b2013-08-26 12:59:38 +01001236 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001237 }
1238
Wei Liuac69c262013-05-02 00:43:57 +00001239 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
Ian Campbell488562862013-02-06 23:41:35 +00001240 if (unlikely(ret < 0))
Wei Liub3f980b2013-08-26 12:59:38 +01001241 break;
Ian Campbell488562862013-02-06 23:41:35 +00001242
Ian Campbellf942dc22011-03-15 00:06:18 +00001243 idx += ret;
1244
1245 if (unlikely(txreq.size < ETH_HLEN)) {
1246 netdev_dbg(vif->dev,
1247 "Bad packet size: %d\n", txreq.size);
1248 netbk_tx_err(vif, &txreq, idx);
Wei Liub3f980b2013-08-26 12:59:38 +01001249 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001250 }
1251
1252 /* No crossing a page as the payload mustn't fragment. */
1253 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
Ian Campbell488562862013-02-06 23:41:35 +00001254 netdev_err(vif->dev,
Ian Campbellf942dc22011-03-15 00:06:18 +00001255 "txreq.offset: %x, size: %u, end: %lu\n",
1256 txreq.offset, txreq.size,
1257 (txreq.offset&~PAGE_MASK) + txreq.size);
Ian Campbell488562862013-02-06 23:41:35 +00001258 netbk_fatal_tx_err(vif);
Wei Liub3f980b2013-08-26 12:59:38 +01001259 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001260 }
1261
Wei Liub3f980b2013-08-26 12:59:38 +01001262 index = pending_index(vif->pending_cons);
1263 pending_idx = vif->pending_ring[index];
Ian Campbellf942dc22011-03-15 00:06:18 +00001264
1265 data_len = (txreq.size > PKT_PROT_LEN &&
Wei Liu37641492013-05-02 00:43:59 +00001266 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
Ian Campbellf942dc22011-03-15 00:06:18 +00001267 PKT_PROT_LEN : txreq.size;
1268
1269 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1270 GFP_ATOMIC | __GFP_NOWARN);
1271 if (unlikely(skb == NULL)) {
1272 netdev_dbg(vif->dev,
1273 "Can't allocate a skb in start_xmit.\n");
1274 netbk_tx_err(vif, &txreq, idx);
1275 break;
1276 }
1277
1278 /* Packets passed to netif_rx() must have some headroom. */
1279 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1280
1281 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1282 struct xen_netif_extra_info *gso;
1283 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1284
1285 if (netbk_set_skb_gso(vif, skb, gso)) {
Ian Campbell488562862013-02-06 23:41:35 +00001286 /* Failure in netbk_set_skb_gso is fatal. */
Ian Campbellf942dc22011-03-15 00:06:18 +00001287 kfree_skb(skb);
Wei Liub3f980b2013-08-26 12:59:38 +01001288 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001289 }
1290 }
1291
1292 /* XXX could copy straight to head */
Wei Liub3f980b2013-08-26 12:59:38 +01001293 page = xen_netbk_alloc_page(vif, pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001294 if (!page) {
1295 kfree_skb(skb);
1296 netbk_tx_err(vif, &txreq, idx);
Wei Liub3f980b2013-08-26 12:59:38 +01001297 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001298 }
1299
Ian Campbellf942dc22011-03-15 00:06:18 +00001300 gop->source.u.ref = txreq.gref;
1301 gop->source.domid = vif->domid;
1302 gop->source.offset = txreq.offset;
1303
1304 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1305 gop->dest.domid = DOMID_SELF;
1306 gop->dest.offset = txreq.offset;
1307
1308 gop->len = txreq.size;
1309 gop->flags = GNTCOPY_source_gref;
1310
1311 gop++;
1312
Wei Liub3f980b2013-08-26 12:59:38 +01001313 memcpy(&vif->pending_tx_info[pending_idx].req,
Ian Campbellf942dc22011-03-15 00:06:18 +00001314 &txreq, sizeof(txreq));
Wei Liub3f980b2013-08-26 12:59:38 +01001315 vif->pending_tx_info[pending_idx].head = index;
Ian Campbellf942dc22011-03-15 00:06:18 +00001316 *((u16 *)skb->data) = pending_idx;
1317
1318 __skb_put(skb, data_len);
1319
1320 skb_shinfo(skb)->nr_frags = ret;
1321 if (data_len < txreq.size) {
1322 skb_shinfo(skb)->nr_frags++;
Ian Campbellea066ad2011-10-05 00:28:46 +00001323 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1324 pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001325 } else {
Ian Campbellea066ad2011-10-05 00:28:46 +00001326 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1327 INVALID_PENDING_IDX);
Ian Campbellf942dc22011-03-15 00:06:18 +00001328 }
1329
Wei Liub3f980b2013-08-26 12:59:38 +01001330 vif->pending_cons++;
Ian Campbellf942dc22011-03-15 00:06:18 +00001331
Wei Liub3f980b2013-08-26 12:59:38 +01001332 request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop);
Ian Campbellf942dc22011-03-15 00:06:18 +00001333 if (request_gop == NULL) {
1334 kfree_skb(skb);
1335 netbk_tx_err(vif, &txreq, idx);
Wei Liub3f980b2013-08-26 12:59:38 +01001336 break;
Ian Campbellf942dc22011-03-15 00:06:18 +00001337 }
1338 gop = request_gop;
1339
Wei Liub3f980b2013-08-26 12:59:38 +01001340 __skb_queue_tail(&vif->tx_queue, skb);
Annie Li1e0b6ea2012-06-27 00:46:58 +00001341
Ian Campbellf942dc22011-03-15 00:06:18 +00001342 vif->tx.req_cons = idx;
Ian Campbellf942dc22011-03-15 00:06:18 +00001343
Wei Liub3f980b2013-08-26 12:59:38 +01001344 if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
Ian Campbellf942dc22011-03-15 00:06:18 +00001345 break;
1346 }
1347
Wei Liub3f980b2013-08-26 12:59:38 +01001348 return gop - vif->tx_copy_ops;
Ian Campbellf942dc22011-03-15 00:06:18 +00001349}
1350
Ian Campbellf942dc22011-03-15 00:06:18 +00001351
Wei Liub3f980b2013-08-26 12:59:38 +01001352static int xen_netbk_tx_submit(struct xenvif *vif, int budget)
1353{
1354 struct gnttab_copy *gop = vif->tx_copy_ops;
1355 struct sk_buff *skb;
1356 int work_done = 0;
1357
1358 while (work_done < budget &&
1359 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
Ian Campbellf942dc22011-03-15 00:06:18 +00001360 struct xen_netif_tx_request *txp;
Ian Campbellf942dc22011-03-15 00:06:18 +00001361 u16 pending_idx;
1362 unsigned data_len;
1363
1364 pending_idx = *((u16 *)skb->data);
Wei Liub3f980b2013-08-26 12:59:38 +01001365 txp = &vif->pending_tx_info[pending_idx].req;
Ian Campbellf942dc22011-03-15 00:06:18 +00001366
1367 /* Check the remap error code. */
Wei Liub3f980b2013-08-26 12:59:38 +01001368 if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) {
Ian Campbellf942dc22011-03-15 00:06:18 +00001369 netdev_dbg(vif->dev, "netback grant failed.\n");
1370 skb_shinfo(skb)->nr_frags = 0;
1371 kfree_skb(skb);
1372 continue;
1373 }
1374
1375 data_len = skb->len;
1376 memcpy(skb->data,
Wei Liub3f980b2013-08-26 12:59:38 +01001377 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
Ian Campbellf942dc22011-03-15 00:06:18 +00001378 data_len);
1379 if (data_len < txp->size) {
1380 /* Append the packet payload as a fragment. */
1381 txp->offset += data_len;
1382 txp->size -= data_len;
1383 } else {
1384 /* Schedule a response immediately. */
Wei Liub3f980b2013-08-26 12:59:38 +01001385 xen_netbk_idx_release(vif, pending_idx,
1386 XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001387 }
1388
1389 if (txp->flags & XEN_NETTXF_csum_blank)
1390 skb->ip_summed = CHECKSUM_PARTIAL;
1391 else if (txp->flags & XEN_NETTXF_data_validated)
1392 skb->ip_summed = CHECKSUM_UNNECESSARY;
1393
Wei Liub3f980b2013-08-26 12:59:38 +01001394 xen_netbk_fill_frags(vif, skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001395
1396 /*
1397 * If the initial fragment was < PKT_PROT_LEN then
1398 * pull through some bytes from the other fragments to
1399 * increase the linear region to PKT_PROT_LEN bytes.
1400 */
1401 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1402 int target = min_t(int, skb->len, PKT_PROT_LEN);
1403 __pskb_pull_tail(skb, target - skb_headlen(skb));
1404 }
1405
1406 skb->dev = vif->dev;
1407 skb->protocol = eth_type_trans(skb, skb->dev);
Jason Wangf9ca8f72013-03-25 20:19:58 +00001408 skb_reset_network_header(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001409
1410 if (checksum_setup(vif, skb)) {
1411 netdev_dbg(vif->dev,
1412 "Can't setup checksum in net_tx_action\n");
1413 kfree_skb(skb);
1414 continue;
1415 }
1416
Jason Wang40893fd2013-03-26 23:11:22 +00001417 skb_probe_transport_header(skb, 0);
Jason Wangf9ca8f72013-03-25 20:19:58 +00001418
Ian Campbellf942dc22011-03-15 00:06:18 +00001419 vif->dev->stats.rx_bytes += skb->len;
1420 vif->dev->stats.rx_packets++;
1421
Wei Liub3f980b2013-08-26 12:59:38 +01001422 work_done++;
1423
1424 netif_receive_skb(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001425 }
Wei Liub3f980b2013-08-26 12:59:38 +01001426
1427 return work_done;
Ian Campbellf942dc22011-03-15 00:06:18 +00001428}
1429
1430/* Called after netfront has transmitted */
Wei Liub3f980b2013-08-26 12:59:38 +01001431int xen_netbk_tx_action(struct xenvif *vif, int budget)
Ian Campbellf942dc22011-03-15 00:06:18 +00001432{
1433 unsigned nr_gops;
Wei Liub3f980b2013-08-26 12:59:38 +01001434 int work_done;
Ian Campbellf942dc22011-03-15 00:06:18 +00001435
Wei Liub3f980b2013-08-26 12:59:38 +01001436 if (unlikely(!tx_work_todo(vif)))
1437 return 0;
1438
1439 nr_gops = xen_netbk_tx_build_gops(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001440
1441 if (nr_gops == 0)
Wei Liub3f980b2013-08-26 12:59:38 +01001442 return 0;
Andres Lagar-Cavillac5718982012-09-14 14:26:59 +00001443
Wei Liub3f980b2013-08-26 12:59:38 +01001444 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
Ian Campbellf942dc22011-03-15 00:06:18 +00001445
Wei Liub3f980b2013-08-26 12:59:38 +01001446 work_done = xen_netbk_tx_submit(vif, nr_gops);
1447
1448 return work_done;
Ian Campbellf942dc22011-03-15 00:06:18 +00001449}
1450
Wei Liub3f980b2013-08-26 12:59:38 +01001451static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx,
Matthew Daley7d5145d2013-02-06 23:41:36 +00001452 u8 status)
Ian Campbellf942dc22011-03-15 00:06:18 +00001453{
Ian Campbellf942dc22011-03-15 00:06:18 +00001454 struct pending_tx_info *pending_tx_info;
Wei Liu2810e5b2013-04-22 02:20:42 +00001455 pending_ring_idx_t head;
1456 u16 peek; /* peek into next tx request */
1457
Wei Liub3f980b2013-08-26 12:59:38 +01001458 BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
Ian Campbellf942dc22011-03-15 00:06:18 +00001459
1460 /* Already complete? */
Wei Liub3f980b2013-08-26 12:59:38 +01001461 if (vif->mmap_pages[pending_idx] == NULL)
Ian Campbellf942dc22011-03-15 00:06:18 +00001462 return;
1463
Wei Liub3f980b2013-08-26 12:59:38 +01001464 pending_tx_info = &vif->pending_tx_info[pending_idx];
Ian Campbellf942dc22011-03-15 00:06:18 +00001465
Wei Liu2810e5b2013-04-22 02:20:42 +00001466 head = pending_tx_info->head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001467
Wei Liub3f980b2013-08-26 12:59:38 +01001468 BUG_ON(!pending_tx_is_head(vif, head));
1469 BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001470
Wei Liu2810e5b2013-04-22 02:20:42 +00001471 do {
1472 pending_ring_idx_t index;
1473 pending_ring_idx_t idx = pending_index(head);
Wei Liub3f980b2013-08-26 12:59:38 +01001474 u16 info_idx = vif->pending_ring[idx];
Ian Campbellf942dc22011-03-15 00:06:18 +00001475
Wei Liub3f980b2013-08-26 12:59:38 +01001476 pending_tx_info = &vif->pending_tx_info[info_idx];
Wei Liu2810e5b2013-04-22 02:20:42 +00001477 make_tx_response(vif, &pending_tx_info->req, status);
Ian Campbellf942dc22011-03-15 00:06:18 +00001478
Wei Liu2810e5b2013-04-22 02:20:42 +00001479 /* Setting any number other than
1480 * INVALID_PENDING_RING_IDX indicates this slot is
1481 * starting a new packet / ending a previous packet.
1482 */
1483 pending_tx_info->head = 0;
1484
Wei Liub3f980b2013-08-26 12:59:38 +01001485 index = pending_index(vif->pending_prod++);
1486 vif->pending_ring[index] = vif->pending_ring[info_idx];
Wei Liu2810e5b2013-04-22 02:20:42 +00001487
Wei Liub3f980b2013-08-26 12:59:38 +01001488 peek = vif->pending_ring[pending_index(++head)];
Wei Liu2810e5b2013-04-22 02:20:42 +00001489
Wei Liub3f980b2013-08-26 12:59:38 +01001490 } while (!pending_tx_is_head(vif, peek));
Wei Liu2810e5b2013-04-22 02:20:42 +00001491
Wei Liub3f980b2013-08-26 12:59:38 +01001492 put_page(vif->mmap_pages[pending_idx]);
1493 vif->mmap_pages[pending_idx] = NULL;
Ian Campbellf942dc22011-03-15 00:06:18 +00001494}
1495
Wei Liu2810e5b2013-04-22 02:20:42 +00001496
Ian Campbellf942dc22011-03-15 00:06:18 +00001497static void make_tx_response(struct xenvif *vif,
1498 struct xen_netif_tx_request *txp,
1499 s8 st)
1500{
1501 RING_IDX i = vif->tx.rsp_prod_pvt;
1502 struct xen_netif_tx_response *resp;
1503 int notify;
1504
1505 resp = RING_GET_RESPONSE(&vif->tx, i);
1506 resp->id = txp->id;
1507 resp->status = st;
1508
1509 if (txp->flags & XEN_NETTXF_extra_info)
1510 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1511
1512 vif->tx.rsp_prod_pvt = ++i;
1513 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1514 if (notify)
Wei Liue1f00a692013-05-22 06:34:45 +00001515 notify_remote_via_irq(vif->tx_irq);
Ian Campbellf942dc22011-03-15 00:06:18 +00001516}
1517
1518static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1519 u16 id,
1520 s8 st,
1521 u16 offset,
1522 u16 size,
1523 u16 flags)
1524{
1525 RING_IDX i = vif->rx.rsp_prod_pvt;
1526 struct xen_netif_rx_response *resp;
1527
1528 resp = RING_GET_RESPONSE(&vif->rx, i);
1529 resp->offset = offset;
1530 resp->flags = flags;
1531 resp->id = id;
1532 resp->status = (s16)size;
1533 if (st < 0)
1534 resp->status = (s16)st;
1535
1536 vif->rx.rsp_prod_pvt = ++i;
1537
1538 return resp;
1539}
1540
Wei Liub3f980b2013-08-26 12:59:38 +01001541static inline int rx_work_todo(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +00001542{
Wei Liub3f980b2013-08-26 12:59:38 +01001543 return !skb_queue_empty(&vif->rx_queue);
Ian Campbellf942dc22011-03-15 00:06:18 +00001544}
1545
Wei Liub3f980b2013-08-26 12:59:38 +01001546static inline int tx_work_todo(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +00001547{
1548
Wei Liub3f980b2013-08-26 12:59:38 +01001549 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
1550 (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1551 < MAX_PENDING_REQS))
Ian Campbellf942dc22011-03-15 00:06:18 +00001552 return 1;
1553
1554 return 0;
1555}
1556
Ian Campbellf942dc22011-03-15 00:06:18 +00001557void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1558{
David Vrabelc9d63692011-09-29 16:53:31 +01001559 if (vif->tx.sring)
1560 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1561 vif->tx.sring);
1562 if (vif->rx.sring)
1563 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1564 vif->rx.sring);
Ian Campbellf942dc22011-03-15 00:06:18 +00001565}
1566
1567int xen_netbk_map_frontend_rings(struct xenvif *vif,
1568 grant_ref_t tx_ring_ref,
1569 grant_ref_t rx_ring_ref)
1570{
David Vrabelc9d63692011-09-29 16:53:31 +01001571 void *addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001572 struct xen_netif_tx_sring *txs;
1573 struct xen_netif_rx_sring *rxs;
1574
1575 int err = -ENOMEM;
1576
David Vrabelc9d63692011-09-29 16:53:31 +01001577 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1578 tx_ring_ref, &addr);
1579 if (err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001580 goto err;
1581
David Vrabelc9d63692011-09-29 16:53:31 +01001582 txs = (struct xen_netif_tx_sring *)addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001583 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1584
David Vrabelc9d63692011-09-29 16:53:31 +01001585 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1586 rx_ring_ref, &addr);
1587 if (err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001588 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001589
David Vrabelc9d63692011-09-29 16:53:31 +01001590 rxs = (struct xen_netif_rx_sring *)addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001591 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1592
David Vrabelc9d63692011-09-29 16:53:31 +01001593 vif->rx_req_cons_peek = 0;
1594
Ian Campbellf942dc22011-03-15 00:06:18 +00001595 return 0;
1596
1597err:
1598 xen_netbk_unmap_frontend_rings(vif);
1599 return err;
1600}
1601
Wei Liub3f980b2013-08-26 12:59:38 +01001602int xen_netbk_kthread(void *data)
1603{
1604 struct xenvif *vif = data;
1605
1606 while (!kthread_should_stop()) {
1607 wait_event_interruptible(vif->wq,
1608 rx_work_todo(vif) ||
1609 kthread_should_stop());
1610 if (kthread_should_stop())
1611 break;
1612
1613 if (rx_work_todo(vif))
1614 xen_netbk_rx_action(vif);
1615
1616 cond_resched();
1617 }
1618
1619 return 0;
1620}
1621
Ian Campbellf942dc22011-03-15 00:06:18 +00001622static int __init netback_init(void)
1623{
Ian Campbellf942dc22011-03-15 00:06:18 +00001624 int rc = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +00001625
Daniel De Graaf2a14b2442011-12-14 15:12:13 -05001626 if (!xen_domain())
Ian Campbellf942dc22011-03-15 00:06:18 +00001627 return -ENODEV;
1628
Wei Liu37641492013-05-02 00:43:59 +00001629 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
Joe Perches383eda32013-06-27 21:57:49 -07001630 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1631 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
Wei Liu37641492013-05-02 00:43:59 +00001632 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
Wei Liu2810e5b2013-04-22 02:20:42 +00001633 }
1634
Ian Campbellf942dc22011-03-15 00:06:18 +00001635 rc = xenvif_xenbus_init();
1636 if (rc)
1637 goto failed_init;
1638
1639 return 0;
1640
1641failed_init:
Ian Campbellf942dc22011-03-15 00:06:18 +00001642 return rc;
Ian Campbellf942dc22011-03-15 00:06:18 +00001643}
1644
1645module_init(netback_init);
1646
Wei Liub103f352013-05-16 23:26:11 +00001647static void __exit netback_fini(void)
1648{
Wei Liub103f352013-05-16 23:26:11 +00001649 xenvif_xenbus_fini();
Wei Liub103f352013-05-16 23:26:11 +00001650}
1651module_exit(netback_fini);
1652
Ian Campbellf942dc22011-03-15 00:06:18 +00001653MODULE_LICENSE("Dual BSD/GPL");
Bastian Blankf984cec2011-06-30 11:19:09 -07001654MODULE_ALIAS("xen-backend:vif");