blob: a5bff2767f15abe09b5f0d0a3bfecfb5775a4e64 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
Joe Perchese005d192012-05-16 19:58:40 +000039#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
Vegard Nossumfe55f6d2008-08-30 12:16:35 +020044#include <linux/kmemcheck.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/mm.h>
46#include <linux/interrupt.h>
47#include <linux/in.h>
48#include <linux/inet.h>
49#include <linux/slab.h>
Florian Westphalde960aa2014-01-26 10:58:16 +010050#include <linux/tcp.h>
51#include <linux/udp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
Jens Axboe9c55e012007-11-06 23:30:13 -080058#include <linux/splice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
David Howells716ea3a2007-04-02 20:19:53 -070062#include <linux/scatterlist.h>
Patrick Ohlyac45f602009-02-12 05:03:37 +000063#include <linux/errqueue.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070064#include <linux/prefetch.h>
Vlad Yasevich0d5501c2014-08-08 14:42:13 -040065#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
Paul Durranted1f50c2014-01-09 10:02:46 +000071#include <net/ip6_checksum.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <net/xfrm.h>
73
74#include <asm/uaccess.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040075#include <trace/events/skb.h>
Eric Dumazet51c56b02012-04-05 11:35:15 +020076#include <linux/highmem.h>
Willem de Bruijnb245be12015-01-30 13:29:32 -050077#include <linux/capability.h>
78#include <linux/user_namespace.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040079
Eric Dumazetd7e88832012-04-30 08:10:34 +000080struct kmem_cache *skbuff_head_cache __read_mostly;
Christoph Lametere18b8902006-12-06 20:33:20 -080081static struct kmem_cache *skbuff_fclone_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/**
Jean Sacrenf05de732013-02-11 13:30:38 +000084 * skb_panic - private function for out-of-line support
85 * @skb: buffer
86 * @sz: size
87 * @addr: address
James Hogan99d58512013-02-13 11:20:27 +000088 * @msg: skb_over_panic or skb_under_panic
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 *
Jean Sacrenf05de732013-02-11 13:30:38 +000090 * Out-of-line support for skb_put() and skb_push().
91 * Called via the wrapper skb_over_panic() or skb_under_panic().
92 * Keep out of line to prevent kernel bloat.
93 * __builtin_return_address is not used because it is not always reliable.
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 */
Jean Sacrenf05de732013-02-11 13:30:38 +000095static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
James Hogan99d58512013-02-13 11:20:27 +000096 const char msg[])
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Joe Perchese005d192012-05-16 19:58:40 +000098 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
James Hogan99d58512013-02-13 11:20:27 +000099 msg, addr, skb->len, sz, skb->head, skb->data,
Joe Perchese005d192012-05-16 19:58:40 +0000100 (unsigned long)skb->tail, (unsigned long)skb->end,
101 skb->dev ? skb->dev->name : "<NULL>");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 BUG();
103}
104
Jean Sacrenf05de732013-02-11 13:30:38 +0000105static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
Jean Sacrenf05de732013-02-11 13:30:38 +0000107 skb_panic(skb, sz, addr, __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
Jean Sacrenf05de732013-02-11 13:30:38 +0000110static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
111{
112 skb_panic(skb, sz, addr, __func__);
113}
Mel Gormanc93bdd02012-07-31 16:44:19 -0700114
115/*
116 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
117 * the caller if emergency pfmemalloc reserves are being used. If it is and
118 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
119 * may be used. Otherwise, the packet data may be discarded until enough
120 * memory is free
121 */
122#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
123 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
stephen hemminger61c5e882012-12-28 18:24:28 +0000124
125static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
126 unsigned long ip, bool *pfmemalloc)
Mel Gormanc93bdd02012-07-31 16:44:19 -0700127{
128 void *obj;
129 bool ret_pfmemalloc = false;
130
131 /*
132 * Try a regular allocation, when that fails and we're not entitled
133 * to the reserves, fail.
134 */
135 obj = kmalloc_node_track_caller(size,
136 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
137 node);
138 if (obj || !(gfp_pfmemalloc_allowed(flags)))
139 goto out;
140
141 /* Try again but now we are using pfmemalloc reserves */
142 ret_pfmemalloc = true;
143 obj = kmalloc_node_track_caller(size, flags, node);
144
145out:
146 if (pfmemalloc)
147 *pfmemalloc = ret_pfmemalloc;
148
149 return obj;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Allocate a new skbuff. We do this ourselves so we can fill in a few
153 * 'private' fields and also do memory statistics to find all the
154 * [BEEP] leaks.
155 *
156 */
157
Patrick McHardy0ebd0ac2013-04-17 06:46:58 +0000158struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
159{
160 struct sk_buff *skb;
161
162 /* Get the HEAD */
163 skb = kmem_cache_alloc_node(skbuff_head_cache,
164 gfp_mask & ~__GFP_DMA, node);
165 if (!skb)
166 goto out;
167
168 /*
169 * Only clear those fields we need to clear, not those that we will
170 * actually initialise below. Hence, don't put any more fields after
171 * the tail pointer in struct sk_buff!
172 */
173 memset(skb, 0, offsetof(struct sk_buff, tail));
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000174 skb->head = NULL;
Patrick McHardy0ebd0ac2013-04-17 06:46:58 +0000175 skb->truesize = sizeof(struct sk_buff);
176 atomic_set(&skb->users, 1);
177
Cong Wang35d04612013-05-29 15:16:05 +0800178 skb->mac_header = (typeof(skb->mac_header))~0U;
Patrick McHardy0ebd0ac2013-04-17 06:46:58 +0000179out:
180 return skb;
181}
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/**
David S. Millerd179cd12005-08-17 14:57:30 -0700184 * __alloc_skb - allocate a network buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * @size: size to allocate
186 * @gfp_mask: allocation mask
Mel Gormanc93bdd02012-07-31 16:44:19 -0700187 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
188 * instead of head cache and allocate a cloned (child) skb.
189 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
190 * allocations in case the data is required for writeback
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800191 * @node: numa node to allocate memory on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 *
193 * Allocate a new &sk_buff. The returned buffer has no headroom and a
Ben Hutchings94b60422012-06-06 15:23:37 +0000194 * tail room of at least size bytes. The object has a reference count
195 * of one. The return is the buffer. On a failure the return is %NULL.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 *
197 * Buffers may only be allocated from interrupts using a @gfp_mask of
198 * %GFP_ATOMIC.
199 */
Al Virodd0fc662005-10-07 07:46:04 +0100200struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
Mel Gormanc93bdd02012-07-31 16:44:19 -0700201 int flags, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
Christoph Lametere18b8902006-12-06 20:33:20 -0800203 struct kmem_cache *cache;
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800204 struct skb_shared_info *shinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 struct sk_buff *skb;
206 u8 *data;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700207 bool pfmemalloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Mel Gormanc93bdd02012-07-31 16:44:19 -0700209 cache = (flags & SKB_ALLOC_FCLONE)
210 ? skbuff_fclone_cache : skbuff_head_cache;
211
212 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
213 gfp_mask |= __GFP_MEMALLOC;
Herbert Xu8798b3f2006-01-23 16:32:45 -0800214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Get the HEAD */
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800216 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 if (!skb)
218 goto out;
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700219 prefetchw(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000221 /* We do our best to align skb_shared_info on a separate cache
222 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
223 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
224 * Both skb->head and skb_shared_info are cache line aligned.
225 */
Tony Lindgrenbc417e32011-11-02 13:40:28 +0000226 size = SKB_DATA_ALIGN(size);
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000227 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Mel Gormanc93bdd02012-07-31 16:44:19 -0700228 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 if (!data)
230 goto nodata;
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000231 /* kmalloc(size) might give us more room than requested.
232 * Put skb_shared_info exactly at the end of allocated zone,
233 * to allow max possible filling before reallocation.
234 */
235 size = SKB_WITH_OVERHEAD(ksize(data));
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700236 prefetchw(data + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Arnaldo Carvalho de Meloca0605a2007-03-19 10:48:59 -0300238 /*
Johannes Bergc8005782008-05-03 20:56:42 -0700239 * Only clear those fields we need to clear, not those that we will
240 * actually initialise below. Hence, don't put any more fields after
241 * the tail pointer in struct sk_buff!
Arnaldo Carvalho de Meloca0605a2007-03-19 10:48:59 -0300242 */
243 memset(skb, 0, offsetof(struct sk_buff, tail));
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000244 /* Account for allocated memory : skb + skb->head */
245 skb->truesize = SKB_TRUESIZE(size);
Mel Gormanc93bdd02012-07-31 16:44:19 -0700246 skb->pfmemalloc = pfmemalloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 atomic_set(&skb->users, 1);
248 skb->head = data;
249 skb->data = data;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700250 skb_reset_tail_pointer(skb);
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700251 skb->end = skb->tail + size;
Cong Wang35d04612013-05-29 15:16:05 +0800252 skb->mac_header = (typeof(skb->mac_header))~0U;
253 skb->transport_header = (typeof(skb->transport_header))~0U;
Stephen Hemminger19633e12009-06-17 05:23:27 +0000254
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800255 /* make sure we initialize shinfo sequentially */
256 shinfo = skb_shinfo(skb);
Eric Dumazetec7d2f22010-05-05 01:07:37 -0700257 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800258 atomic_set(&shinfo->dataref, 1);
Eric Dumazetc2aa3662011-01-25 23:18:38 +0000259 kmemcheck_annotate_variable(shinfo->destructor_arg);
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800260
Mel Gormanc93bdd02012-07-31 16:44:19 -0700261 if (flags & SKB_ALLOC_FCLONE) {
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700262 struct sk_buff_fclones *fclones;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700264 fclones = container_of(skb, struct sk_buff_fclones, skb1);
265
266 kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
David S. Millerd179cd12005-08-17 14:57:30 -0700267 skb->fclone = SKB_FCLONE_ORIG;
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700268 atomic_set(&fclones->fclone_ref, 1);
David S. Millerd179cd12005-08-17 14:57:30 -0700269
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800270 fclones->skb2.fclone = SKB_FCLONE_CLONE;
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700271 fclones->skb2.pfmemalloc = pfmemalloc;
David S. Millerd179cd12005-08-17 14:57:30 -0700272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273out:
274 return skb;
275nodata:
Herbert Xu8798b3f2006-01-23 16:32:45 -0800276 kmem_cache_free(cache, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 skb = NULL;
278 goto out;
279}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800280EXPORT_SYMBOL(__alloc_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282/**
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000283 * build_skb - build a network buffer
284 * @data: data buffer provided by caller
Eric Dumazetd3836f22012-04-27 00:33:38 +0000285 * @frag_size: size of fragment, or 0 if head was kmalloced
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000286 *
287 * Allocate a new &sk_buff. Caller provides space holding head and
Florian Fainellideceb4c2013-07-23 20:22:39 +0100288 * skb_shared_info. @data must have been allocated by kmalloc() only if
289 * @frag_size is 0, otherwise data should come from the page allocator.
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000290 * The return is the new skb buffer.
291 * On a failure the return is %NULL, and @data is not freed.
292 * Notes :
293 * Before IO, driver allocates only data buffer where NIC put incoming frame
294 * Driver should add room at head (NET_SKB_PAD) and
295 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
296 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
297 * before giving packet to stack.
298 * RX rings only contains data buffers, not full skbs.
299 */
Eric Dumazetd3836f22012-04-27 00:33:38 +0000300struct sk_buff *build_skb(void *data, unsigned int frag_size)
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000301{
302 struct skb_shared_info *shinfo;
303 struct sk_buff *skb;
Eric Dumazetd3836f22012-04-27 00:33:38 +0000304 unsigned int size = frag_size ? : ksize(data);
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000305
306 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
Eric Dumazetd3836f22012-04-27 00:33:38 +0000310 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000311
312 memset(skb, 0, offsetof(struct sk_buff, tail));
313 skb->truesize = SKB_TRUESIZE(size);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000314 skb->head_frag = frag_size != 0;
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000315 atomic_set(&skb->users, 1);
316 skb->head = data;
317 skb->data = data;
318 skb_reset_tail_pointer(skb);
319 skb->end = skb->tail + size;
Cong Wang35d04612013-05-29 15:16:05 +0800320 skb->mac_header = (typeof(skb->mac_header))~0U;
321 skb->transport_header = (typeof(skb->transport_header))~0U;
Eric Dumazetb2b5ce92011-11-14 06:03:34 +0000322
323 /* make sure we initialize shinfo sequentially */
324 shinfo = skb_shinfo(skb);
325 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
326 atomic_set(&shinfo->dataref, 1);
327 kmemcheck_annotate_variable(shinfo->destructor_arg);
328
329 return skb;
330}
331EXPORT_SYMBOL(build_skb);
332
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000333struct netdev_alloc_cache {
Eric Dumazet69b08f62012-09-26 06:46:57 +0000334 struct page_frag frag;
335 /* we maintain a pagecount bias, so that we dont dirty cache line
336 * containing page->_count every time we allocate a fragment.
337 */
338 unsigned int pagecnt_bias;
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000339};
340static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
Alexander Duyckffde7322014-12-09 19:40:42 -0800341static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000342
Alexander Duyckffde7322014-12-09 19:40:42 -0800343static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
344 gfp_t gfp_mask)
Eric Dumazet6f532612012-05-18 05:12:12 +0000345{
Alexander Duyckffde7322014-12-09 19:40:42 -0800346 const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
347 struct page *page = NULL;
348 gfp_t gfp = gfp_mask;
Eric Dumazet6f532612012-05-18 05:12:12 +0000349
Alexander Duyckffde7322014-12-09 19:40:42 -0800350 if (order) {
351 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
352 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
353 nc->frag.size = PAGE_SIZE << (page ? order : 0);
354 }
355
356 if (unlikely(!page))
357 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
358
359 nc->frag.page = page;
360
361 return page;
362}
363
364static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
365 unsigned int fragsz, gfp_t gfp_mask)
366{
367 struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
368 struct page *page = nc->frag.page;
369 unsigned int size;
370 int offset;
371
372 if (unlikely(!page)) {
Eric Dumazet6f532612012-05-18 05:12:12 +0000373refill:
Alexander Duyckffde7322014-12-09 19:40:42 -0800374 page = __page_frag_refill(nc, gfp_mask);
375 if (!page)
376 return NULL;
Eric Dumazet69b08f62012-09-26 06:46:57 +0000377
Alexander Duyckffde7322014-12-09 19:40:42 -0800378 /* if size can vary use frag.size else just use PAGE_SIZE */
379 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
380
Eric Dumazet4c450582014-10-10 04:48:18 -0700381 /* Even if we own the page, we do not use atomic_set().
382 * This would break get_page_unless_zero() users.
383 */
Alexander Duyckffde7322014-12-09 19:40:42 -0800384 atomic_add(size - 1, &page->_count);
385
386 /* reset page count bias and offset to start of new frag */
387 nc->pagecnt_bias = size;
388 nc->frag.offset = size;
Eric Dumazet6f532612012-05-18 05:12:12 +0000389 }
Alexander Duyck540eb7b2012-07-12 14:23:50 +0000390
Alexander Duyckffde7322014-12-09 19:40:42 -0800391 offset = nc->frag.offset - fragsz;
392 if (unlikely(offset < 0)) {
393 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
394 goto refill;
395
396 /* if size can vary use frag.size else just use PAGE_SIZE */
397 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
398
399 /* OK, page count is 0, we can safely set it */
400 atomic_set(&page->_count, size);
401
402 /* reset page count bias and offset to start of new frag */
403 nc->pagecnt_bias = size;
404 offset = size - fragsz;
Eric Dumazet6f532612012-05-18 05:12:12 +0000405 }
Alexander Duyck540eb7b2012-07-12 14:23:50 +0000406
Alexander Duyck540eb7b2012-07-12 14:23:50 +0000407 nc->pagecnt_bias--;
Alexander Duyckffde7322014-12-09 19:40:42 -0800408 nc->frag.offset = offset;
409
410 return page_address(page) + offset;
411}
412
413static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
414{
415 unsigned long flags;
416 void *data;
417
418 local_irq_save(flags);
419 data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
Eric Dumazet6f532612012-05-18 05:12:12 +0000420 local_irq_restore(flags);
421 return data;
422}
Mel Gormanc93bdd02012-07-31 16:44:19 -0700423
424/**
425 * netdev_alloc_frag - allocate a page fragment
426 * @fragsz: fragment size
427 *
428 * Allocates a frag from a page for receive buffer.
429 * Uses GFP_ATOMIC allocations.
430 */
431void *netdev_alloc_frag(unsigned int fragsz)
432{
433 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
434}
Eric Dumazet6f532612012-05-18 05:12:12 +0000435EXPORT_SYMBOL(netdev_alloc_frag);
436
Alexander Duyckffde7322014-12-09 19:40:42 -0800437static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
438{
439 return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
440}
441
442void *napi_alloc_frag(unsigned int fragsz)
443{
444 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
445}
446EXPORT_SYMBOL(napi_alloc_frag);
447
Eric Dumazet6f532612012-05-18 05:12:12 +0000448/**
Alexander Duyckfd11a832014-12-09 19:40:49 -0800449 * __alloc_rx_skb - allocate an skbuff for rx
Christoph Hellwig8af27452006-07-31 22:35:23 -0700450 * @length: length to allocate
451 * @gfp_mask: get_free_pages mask, passed to alloc_skb
Alexander Duyckfd11a832014-12-09 19:40:49 -0800452 * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
453 * allocations in case we have to fallback to __alloc_skb()
454 * If SKB_ALLOC_NAPI is set, page fragment will be allocated
455 * from napi_cache instead of netdev_cache.
Christoph Hellwig8af27452006-07-31 22:35:23 -0700456 *
457 * Allocate a new &sk_buff and assign it a usage count of one. The
458 * buffer has unspecified headroom built in. Users should allocate
459 * the headroom they think they need without accounting for the
460 * built in space. The built in space is used for optimisations.
461 *
462 * %NULL is returned if there is no free memory.
463 */
Alexander Duyckfd11a832014-12-09 19:40:49 -0800464static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
465 int flags)
Christoph Hellwig8af27452006-07-31 22:35:23 -0700466{
Eric Dumazet6f532612012-05-18 05:12:12 +0000467 struct sk_buff *skb = NULL;
Alexander Duyckfd11a832014-12-09 19:40:49 -0800468 unsigned int fragsz = SKB_DATA_ALIGN(length) +
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Christoph Hellwig8af27452006-07-31 22:35:23 -0700470
Eric Dumazet310e1582012-07-16 13:15:52 +0200471 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
Mel Gormanc93bdd02012-07-31 16:44:19 -0700472 void *data;
473
474 if (sk_memalloc_socks())
475 gfp_mask |= __GFP_MEMALLOC;
476
Alexander Duyckfd11a832014-12-09 19:40:49 -0800477 data = (flags & SKB_ALLOC_NAPI) ?
478 __napi_alloc_frag(fragsz, gfp_mask) :
479 __netdev_alloc_frag(fragsz, gfp_mask);
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000480
Eric Dumazet6f532612012-05-18 05:12:12 +0000481 if (likely(data)) {
482 skb = build_skb(data, fragsz);
483 if (unlikely(!skb))
484 put_page(virt_to_head_page(data));
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000485 }
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000486 } else {
Alexander Duyckfd11a832014-12-09 19:40:49 -0800487 skb = __alloc_skb(length, gfp_mask,
Mel Gormanc93bdd02012-07-31 16:44:19 -0700488 SKB_ALLOC_RX, NUMA_NO_NODE);
Eric Dumazeta1c7fff2012-05-17 07:34:16 +0000489 }
Alexander Duyckfd11a832014-12-09 19:40:49 -0800490 return skb;
491}
492
493/**
494 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
495 * @dev: network device to receive on
496 * @length: length to allocate
497 * @gfp_mask: get_free_pages mask, passed to alloc_skb
498 *
499 * Allocate a new &sk_buff and assign it a usage count of one. The
500 * buffer has NET_SKB_PAD headroom built in. Users should allocate
501 * the headroom they think they need without accounting for the
502 * built in space. The built in space is used for optimisations.
503 *
504 * %NULL is returned if there is no free memory.
505 */
506struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
507 unsigned int length, gfp_t gfp_mask)
508{
509 struct sk_buff *skb;
510
511 length += NET_SKB_PAD;
512 skb = __alloc_rx_skb(length, gfp_mask, 0);
513
Christoph Hellwig7b2e4972006-08-07 16:09:04 -0700514 if (likely(skb)) {
Christoph Hellwig8af27452006-07-31 22:35:23 -0700515 skb_reserve(skb, NET_SKB_PAD);
Christoph Hellwig7b2e4972006-08-07 16:09:04 -0700516 skb->dev = dev;
517 }
Alexander Duyckfd11a832014-12-09 19:40:49 -0800518
Christoph Hellwig8af27452006-07-31 22:35:23 -0700519 return skb;
520}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800521EXPORT_SYMBOL(__netdev_alloc_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Alexander Duyckfd11a832014-12-09 19:40:49 -0800523/**
524 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
525 * @napi: napi instance this buffer was allocated for
526 * @length: length to allocate
527 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
528 *
529 * Allocate a new sk_buff for use in NAPI receive. This buffer will
530 * attempt to allocate the head from a special reserved region used
531 * only for NAPI Rx allocation. By doing this we can save several
532 * CPU cycles by avoiding having to disable and re-enable IRQs.
533 *
534 * %NULL is returned if there is no free memory.
535 */
536struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
537 unsigned int length, gfp_t gfp_mask)
538{
539 struct sk_buff *skb;
540
541 length += NET_SKB_PAD + NET_IP_ALIGN;
542 skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
543
544 if (likely(skb)) {
545 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
546 skb->dev = napi->dev;
547 }
548
549 return skb;
550}
551EXPORT_SYMBOL(__napi_alloc_skb);
552
Peter Zijlstra654bed12008-10-07 14:22:33 -0700553void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
Eric Dumazet50269e12012-03-23 23:59:33 +0000554 int size, unsigned int truesize)
Peter Zijlstra654bed12008-10-07 14:22:33 -0700555{
556 skb_fill_page_desc(skb, i, page, off, size);
557 skb->len += size;
558 skb->data_len += size;
Eric Dumazet50269e12012-03-23 23:59:33 +0000559 skb->truesize += truesize;
Peter Zijlstra654bed12008-10-07 14:22:33 -0700560}
561EXPORT_SYMBOL(skb_add_rx_frag);
562
Jason Wangf8e617e2013-11-01 14:07:47 +0800563void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
564 unsigned int truesize)
565{
566 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
567
568 skb_frag_size_add(frag, size);
569 skb->len += size;
570 skb->data_len += size;
571 skb->truesize += truesize;
572}
573EXPORT_SYMBOL(skb_coalesce_rx_frag);
574
Herbert Xu27b437c2006-07-13 19:26:39 -0700575static void skb_drop_list(struct sk_buff **listp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Eric Dumazetbd8a7032013-06-24 06:26:00 -0700577 kfree_skb_list(*listp);
Herbert Xu27b437c2006-07-13 19:26:39 -0700578 *listp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
Herbert Xu27b437c2006-07-13 19:26:39 -0700581static inline void skb_drop_fraglist(struct sk_buff *skb)
582{
583 skb_drop_list(&skb_shinfo(skb)->frag_list);
584}
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586static void skb_clone_fraglist(struct sk_buff *skb)
587{
588 struct sk_buff *list;
589
David S. Millerfbb398a2009-06-09 00:18:59 -0700590 skb_walk_frags(skb, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 skb_get(list);
592}
593
Eric Dumazetd3836f22012-04-27 00:33:38 +0000594static void skb_free_head(struct sk_buff *skb)
595{
596 if (skb->head_frag)
597 put_page(virt_to_head_page(skb->head));
598 else
599 kfree(skb->head);
600}
601
Adrian Bunk5bba1712006-06-29 13:02:35 -0700602static void skb_release_data(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603{
Eric Dumazetff04a772014-09-23 18:39:30 -0700604 struct skb_shared_info *shinfo = skb_shinfo(skb);
605 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Eric Dumazetff04a772014-09-23 18:39:30 -0700607 if (skb->cloned &&
608 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
609 &shinfo->dataref))
610 return;
Shirley Maa6686f22011-07-06 12:22:12 +0000611
Eric Dumazetff04a772014-09-23 18:39:30 -0700612 for (i = 0; i < shinfo->nr_frags; i++)
613 __skb_frag_unref(&shinfo->frags[i]);
Shirley Maa6686f22011-07-06 12:22:12 +0000614
Eric Dumazetff04a772014-09-23 18:39:30 -0700615 /*
616 * If skb buf is from userspace, we need to notify the caller
617 * the lower device DMA has done;
618 */
619 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
620 struct ubuf_info *uarg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Eric Dumazetff04a772014-09-23 18:39:30 -0700622 uarg = shinfo->destructor_arg;
623 if (uarg->callback)
624 uarg->callback(uarg, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 }
Eric Dumazetff04a772014-09-23 18:39:30 -0700626
627 if (shinfo->frag_list)
628 kfree_skb_list(shinfo->frag_list);
629
630 skb_free_head(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
633/*
634 * Free an skbuff by memory without cleaning the state.
635 */
Herbert Xu2d4baff2007-11-26 23:11:19 +0800636static void kfree_skbmem(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700638 struct sk_buff_fclones *fclones;
David S. Millerd179cd12005-08-17 14:57:30 -0700639
David S. Millerd179cd12005-08-17 14:57:30 -0700640 switch (skb->fclone) {
641 case SKB_FCLONE_UNAVAILABLE:
642 kmem_cache_free(skbuff_head_cache, skb);
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800643 return;
David S. Millerd179cd12005-08-17 14:57:30 -0700644
645 case SKB_FCLONE_ORIG:
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700646 fclones = container_of(skb, struct sk_buff_fclones, skb1);
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800647
648 /* We usually free the clone (TX completion) before original skb
649 * This test would have no chance to be true for the clone,
650 * while here, branch prediction will be good.
651 */
652 if (atomic_read(&fclones->fclone_ref) == 1)
653 goto fastpath;
David S. Millerd179cd12005-08-17 14:57:30 -0700654 break;
655
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800656 default: /* SKB_FCLONE_CLONE */
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700657 fclones = container_of(skb, struct sk_buff_fclones, skb2);
David S. Millerd179cd12005-08-17 14:57:30 -0700658 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700659 }
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800660 if (!atomic_dec_and_test(&fclones->fclone_ref))
661 return;
662fastpath:
663 kmem_cache_free(skbuff_fclone_cache, fclones);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
Lennert Buytenhek04a4bb52008-10-01 02:33:12 -0700666static void skb_release_head_state(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Eric Dumazetadf30902009-06-02 05:19:30 +0000668 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669#ifdef CONFIG_XFRM
670 secpath_put(skb->sp);
671#endif
Stephen Hemminger9c2b3322005-04-19 22:39:42 -0700672 if (skb->destructor) {
673 WARN_ON(in_irq());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 skb->destructor(skb);
675 }
Igor Maravića3bf7ae2011-12-12 02:58:22 +0000676#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700677 nf_conntrack_put(skb->nfct);
KOVACS Krisztian2fc72c72011-01-12 20:25:08 +0100678#endif
Pablo Neira Ayuso1109a902014-10-01 11:19:17 +0200679#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 nf_bridge_put(skb->nf_bridge);
681#endif
Lennert Buytenhek04a4bb52008-10-01 02:33:12 -0700682}
683
684/* Free everything but the sk_buff shell. */
685static void skb_release_all(struct sk_buff *skb)
686{
687 skb_release_head_state(skb);
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000688 if (likely(skb->head))
Patrick McHardy0ebd0ac2013-04-17 06:46:58 +0000689 skb_release_data(skb);
Herbert Xu2d4baff2007-11-26 23:11:19 +0800690}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Herbert Xu2d4baff2007-11-26 23:11:19 +0800692/**
693 * __kfree_skb - private function
694 * @skb: buffer
695 *
696 * Free an sk_buff. Release anything attached to the buffer.
697 * Clean the state. This is an internal helper function. Users should
698 * always call kfree_skb
699 */
700
701void __kfree_skb(struct sk_buff *skb)
702{
703 skb_release_all(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 kfree_skbmem(skb);
705}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800706EXPORT_SYMBOL(__kfree_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708/**
Jörn Engel231d06a2006-03-20 21:28:35 -0800709 * kfree_skb - free an sk_buff
710 * @skb: buffer to free
711 *
712 * Drop a reference to the buffer and free it if the usage count has
713 * hit zero.
714 */
715void kfree_skb(struct sk_buff *skb)
716{
717 if (unlikely(!skb))
718 return;
719 if (likely(atomic_read(&skb->users) == 1))
720 smp_rmb();
721 else if (likely(!atomic_dec_and_test(&skb->users)))
722 return;
Neil Hormanead2ceb2009-03-11 09:49:55 +0000723 trace_kfree_skb(skb, __builtin_return_address(0));
Jörn Engel231d06a2006-03-20 21:28:35 -0800724 __kfree_skb(skb);
725}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800726EXPORT_SYMBOL(kfree_skb);
Jörn Engel231d06a2006-03-20 21:28:35 -0800727
Eric Dumazetbd8a7032013-06-24 06:26:00 -0700728void kfree_skb_list(struct sk_buff *segs)
729{
730 while (segs) {
731 struct sk_buff *next = segs->next;
732
733 kfree_skb(segs);
734 segs = next;
735 }
736}
737EXPORT_SYMBOL(kfree_skb_list);
738
Stephen Hemmingerd1a203e2008-11-01 21:01:09 -0700739/**
Michael S. Tsirkin25121172012-11-01 09:16:28 +0000740 * skb_tx_error - report an sk_buff xmit error
741 * @skb: buffer that triggered an error
742 *
743 * Report xmit error if a device callback is tracking this skb.
744 * skb must be freed afterwards.
745 */
746void skb_tx_error(struct sk_buff *skb)
747{
748 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
749 struct ubuf_info *uarg;
750
751 uarg = skb_shinfo(skb)->destructor_arg;
752 if (uarg->callback)
753 uarg->callback(uarg, false);
754 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
755 }
756}
757EXPORT_SYMBOL(skb_tx_error);
758
759/**
Neil Hormanead2ceb2009-03-11 09:49:55 +0000760 * consume_skb - free an skbuff
761 * @skb: buffer to free
762 *
763 * Drop a ref to the buffer and free it if the usage count has hit zero
764 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
765 * is being dropped after a failure and notes that
766 */
767void consume_skb(struct sk_buff *skb)
768{
769 if (unlikely(!skb))
770 return;
771 if (likely(atomic_read(&skb->users) == 1))
772 smp_rmb();
773 else if (likely(!atomic_dec_and_test(&skb->users)))
774 return;
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900775 trace_consume_skb(skb);
Neil Hormanead2ceb2009-03-11 09:49:55 +0000776 __kfree_skb(skb);
777}
778EXPORT_SYMBOL(consume_skb);
779
Eric Dumazetb1937222014-09-28 22:18:47 -0700780/* Make sure a field is enclosed inside headers_start/headers_end section */
781#define CHECK_SKB_FIELD(field) \
782 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
783 offsetof(struct sk_buff, headers_start)); \
784 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
785 offsetof(struct sk_buff, headers_end)); \
786
Herbert Xudec18812007-10-14 00:37:30 -0700787static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
788{
789 new->tstamp = old->tstamp;
Eric Dumazetb1937222014-09-28 22:18:47 -0700790 /* We do not copy old->sk */
Herbert Xudec18812007-10-14 00:37:30 -0700791 new->dev = old->dev;
Eric Dumazetb1937222014-09-28 22:18:47 -0700792 memcpy(new->cb, old->cb, sizeof(old->cb));
Eric Dumazet7fee2262010-05-11 23:19:48 +0000793 skb_dst_copy(new, old);
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -0700794#ifdef CONFIG_XFRM
Herbert Xudec18812007-10-14 00:37:30 -0700795 new->sp = secpath_get(old->sp);
796#endif
Eric Dumazetb1937222014-09-28 22:18:47 -0700797 __nf_copy(new, old, false);
Patrick McHardy6aa895b2008-07-14 22:49:06 -0700798
Eric Dumazetb1937222014-09-28 22:18:47 -0700799 /* Note : this field could be in headers_start/headers_end section
800 * It is not yet because we do not want to have a 16 bit hole
801 */
802 new->queue_mapping = old->queue_mapping;
Eliezer Tamir06021292013-06-10 11:39:50 +0300803
Eric Dumazetb1937222014-09-28 22:18:47 -0700804 memcpy(&new->headers_start, &old->headers_start,
805 offsetof(struct sk_buff, headers_end) -
806 offsetof(struct sk_buff, headers_start));
807 CHECK_SKB_FIELD(protocol);
808 CHECK_SKB_FIELD(csum);
809 CHECK_SKB_FIELD(hash);
810 CHECK_SKB_FIELD(priority);
811 CHECK_SKB_FIELD(skb_iif);
812 CHECK_SKB_FIELD(vlan_proto);
813 CHECK_SKB_FIELD(vlan_tci);
814 CHECK_SKB_FIELD(transport_header);
815 CHECK_SKB_FIELD(network_header);
816 CHECK_SKB_FIELD(mac_header);
817 CHECK_SKB_FIELD(inner_protocol);
818 CHECK_SKB_FIELD(inner_transport_header);
819 CHECK_SKB_FIELD(inner_network_header);
820 CHECK_SKB_FIELD(inner_mac_header);
821 CHECK_SKB_FIELD(mark);
822#ifdef CONFIG_NETWORK_SECMARK
823 CHECK_SKB_FIELD(secmark);
824#endif
Cong Wange0d10952013-08-01 11:10:25 +0800825#ifdef CONFIG_NET_RX_BUSY_POLL
Eric Dumazetb1937222014-09-28 22:18:47 -0700826 CHECK_SKB_FIELD(napi_id);
Eliezer Tamir06021292013-06-10 11:39:50 +0300827#endif
Eric Dumazetb1937222014-09-28 22:18:47 -0700828#ifdef CONFIG_NET_SCHED
829 CHECK_SKB_FIELD(tc_index);
830#ifdef CONFIG_NET_CLS_ACT
831 CHECK_SKB_FIELD(tc_verd);
832#endif
833#endif
834
Herbert Xudec18812007-10-14 00:37:30 -0700835}
836
Herbert Xu82c49a32009-05-22 22:11:37 +0000837/*
838 * You should not add any new code to this function. Add it to
839 * __copy_skb_header above instead.
840 */
Herbert Xue0053ec2007-10-14 00:37:52 -0700841static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843#define C(x) n->x = skb->x
844
845 n->next = n->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 n->sk = NULL;
Herbert Xudec18812007-10-14 00:37:30 -0700847 __copy_skb_header(n, skb);
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 C(len);
850 C(data_len);
Alexey Dobriyan3e6b3b22007-03-16 15:00:46 -0700851 C(mac_len);
Patrick McHardy334a8132007-06-25 04:35:20 -0700852 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
Paul Moore02f1c892008-01-07 21:56:41 -0800853 n->cloned = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 n->nohdr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 n->destructor = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 C(tail);
857 C(end);
Paul Moore02f1c892008-01-07 21:56:41 -0800858 C(head);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000859 C(head_frag);
Paul Moore02f1c892008-01-07 21:56:41 -0800860 C(data);
861 C(truesize);
862 atomic_set(&n->users, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 atomic_inc(&(skb_shinfo(skb)->dataref));
865 skb->cloned = 1;
866
867 return n;
Herbert Xue0053ec2007-10-14 00:37:52 -0700868#undef C
869}
870
871/**
872 * skb_morph - morph one skb into another
873 * @dst: the skb to receive the contents
874 * @src: the skb to supply the contents
875 *
876 * This is identical to skb_clone except that the target skb is
877 * supplied by the user.
878 *
879 * The target skb is returned upon exit.
880 */
881struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
882{
Herbert Xu2d4baff2007-11-26 23:11:19 +0800883 skb_release_all(dst);
Herbert Xue0053ec2007-10-14 00:37:52 -0700884 return __skb_clone(dst, src);
885}
886EXPORT_SYMBOL_GPL(skb_morph);
887
Ben Hutchings2c530402012-07-10 10:55:09 +0000888/**
889 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
Michael S. Tsirkin48c83012011-08-31 08:03:29 +0000890 * @skb: the skb to modify
891 * @gfp_mask: allocation priority
892 *
893 * This must be called on SKBTX_DEV_ZEROCOPY skb.
894 * It will copy all frags into kernel and drop the reference
895 * to userspace pages.
896 *
897 * If this function is called from an interrupt gfp_mask() must be
898 * %GFP_ATOMIC.
899 *
900 * Returns 0 on success or a negative error code on failure
901 * to allocate kernel memory to copy to.
902 */
903int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
Shirley Maa6686f22011-07-06 12:22:12 +0000904{
905 int i;
906 int num_frags = skb_shinfo(skb)->nr_frags;
907 struct page *page, *head = NULL;
908 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
909
910 for (i = 0; i < num_frags; i++) {
911 u8 *vaddr;
912 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
913
Krishna Kumar02756ed2012-07-17 02:05:29 +0000914 page = alloc_page(gfp_mask);
Shirley Maa6686f22011-07-06 12:22:12 +0000915 if (!page) {
916 while (head) {
Sunghan Suh40dadff2013-07-12 16:17:23 +0900917 struct page *next = (struct page *)page_private(head);
Shirley Maa6686f22011-07-06 12:22:12 +0000918 put_page(head);
919 head = next;
920 }
921 return -ENOMEM;
922 }
Eric Dumazet51c56b02012-04-05 11:35:15 +0200923 vaddr = kmap_atomic(skb_frag_page(f));
Shirley Maa6686f22011-07-06 12:22:12 +0000924 memcpy(page_address(page),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000925 vaddr + f->page_offset, skb_frag_size(f));
Eric Dumazet51c56b02012-04-05 11:35:15 +0200926 kunmap_atomic(vaddr);
Sunghan Suh40dadff2013-07-12 16:17:23 +0900927 set_page_private(page, (unsigned long)head);
Shirley Maa6686f22011-07-06 12:22:12 +0000928 head = page;
929 }
930
931 /* skb frags release userspace buffers */
Krishna Kumar02756ed2012-07-17 02:05:29 +0000932 for (i = 0; i < num_frags; i++)
Ian Campbella8605c62011-10-19 23:01:49 +0000933 skb_frag_unref(skb, i);
Shirley Maa6686f22011-07-06 12:22:12 +0000934
Michael S. Tsirkine19d6762012-11-01 09:16:22 +0000935 uarg->callback(uarg, false);
Shirley Maa6686f22011-07-06 12:22:12 +0000936
937 /* skb frags point to kernel buffers */
Krishna Kumar02756ed2012-07-17 02:05:29 +0000938 for (i = num_frags - 1; i >= 0; i--) {
939 __skb_fill_page_desc(skb, i, head, 0,
940 skb_shinfo(skb)->frags[i].size);
Sunghan Suh40dadff2013-07-12 16:17:23 +0900941 head = (struct page *)page_private(head);
Shirley Maa6686f22011-07-06 12:22:12 +0000942 }
Michael S. Tsirkin48c83012011-08-31 08:03:29 +0000943
944 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
Shirley Maa6686f22011-07-06 12:22:12 +0000945 return 0;
946}
Michael S. Tsirkindcc0fb72012-07-20 09:23:20 +0000947EXPORT_SYMBOL_GPL(skb_copy_ubufs);
Shirley Maa6686f22011-07-06 12:22:12 +0000948
Herbert Xue0053ec2007-10-14 00:37:52 -0700949/**
950 * skb_clone - duplicate an sk_buff
951 * @skb: buffer to clone
952 * @gfp_mask: allocation priority
953 *
954 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
955 * copies share the same packet data but not structure. The new
956 * buffer has a reference count of 1. If the allocation fails the
957 * function returns %NULL otherwise the new buffer is returned.
958 *
959 * If this function is called from an interrupt gfp_mask() must be
960 * %GFP_ATOMIC.
961 */
962
963struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
964{
Eric Dumazetd0bf4a92014-09-29 13:29:15 -0700965 struct sk_buff_fclones *fclones = container_of(skb,
966 struct sk_buff_fclones,
967 skb1);
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800968 struct sk_buff *n;
Herbert Xue0053ec2007-10-14 00:37:52 -0700969
Michael S. Tsirkin70008aa2012-07-20 09:23:10 +0000970 if (skb_orphan_frags(skb, gfp_mask))
971 return NULL;
Shirley Maa6686f22011-07-06 12:22:12 +0000972
Herbert Xue0053ec2007-10-14 00:37:52 -0700973 if (skb->fclone == SKB_FCLONE_ORIG &&
Eric Dumazet6ffe75e2014-12-03 17:04:39 -0800974 atomic_read(&fclones->fclone_ref) == 1) {
975 n = &fclones->skb2;
976 atomic_set(&fclones->fclone_ref, 2);
Herbert Xue0053ec2007-10-14 00:37:52 -0700977 } else {
Mel Gormanc93bdd02012-07-31 16:44:19 -0700978 if (skb_pfmemalloc(skb))
979 gfp_mask |= __GFP_MEMALLOC;
980
Herbert Xue0053ec2007-10-14 00:37:52 -0700981 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
982 if (!n)
983 return NULL;
Vegard Nossumfe55f6d2008-08-30 12:16:35 +0200984
985 kmemcheck_annotate_bitfield(n, flags1);
Herbert Xue0053ec2007-10-14 00:37:52 -0700986 n->fclone = SKB_FCLONE_UNAVAILABLE;
987 }
988
989 return __skb_clone(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990}
David S. Millerb4ac530fc2009-02-10 02:09:24 -0800991EXPORT_SYMBOL(skb_clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Pravin B Shelarf5b17292013-03-07 13:21:40 +0000993static void skb_headers_offset_update(struct sk_buff *skb, int off)
994{
Eric Dumazet030737b2013-10-19 11:42:54 -0700995 /* Only adjust this if it actually is csum_start rather than csum */
996 if (skb->ip_summed == CHECKSUM_PARTIAL)
997 skb->csum_start += off;
Pravin B Shelarf5b17292013-03-07 13:21:40 +0000998 /* {transport,network,mac}_header and tail are relative to skb->head */
999 skb->transport_header += off;
1000 skb->network_header += off;
1001 if (skb_mac_header_was_set(skb))
1002 skb->mac_header += off;
1003 skb->inner_transport_header += off;
1004 skb->inner_network_header += off;
Pravin B Shelaraefbd2b2013-03-07 13:21:46 +00001005 skb->inner_mac_header += off;
Pravin B Shelarf5b17292013-03-07 13:21:40 +00001006}
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1009{
Herbert Xudec18812007-10-14 00:37:30 -07001010 __copy_skb_header(new, old);
1011
Herbert Xu79671682006-06-22 02:40:14 -07001012 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1013 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1014 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Mel Gormanc93bdd02012-07-31 16:44:19 -07001017static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1018{
1019 if (skb_pfmemalloc(skb))
1020 return SKB_ALLOC_RX;
1021 return 0;
1022}
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024/**
1025 * skb_copy - create private copy of an sk_buff
1026 * @skb: buffer to copy
1027 * @gfp_mask: allocation priority
1028 *
1029 * Make a copy of both an &sk_buff and its data. This is used when the
1030 * caller wishes to modify the data and needs a private copy of the
1031 * data to alter. Returns %NULL on failure or the pointer to the buffer
1032 * on success. The returned buffer has a reference count of 1.
1033 *
1034 * As by-product this function converts non-linear &sk_buff to linear
1035 * one, so that &sk_buff becomes completely private and caller is allowed
1036 * to modify all the data of returned buffer. This means that this
1037 * function is not recommended for use in circumstances when only
1038 * header is going to be modified. Use pskb_copy() instead.
1039 */
1040
Al Virodd0fc662005-10-07 07:46:04 +01001041struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042{
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001043 int headerlen = skb_headroom(skb);
Alexander Duyckec47ea82012-05-04 14:26:56 +00001044 unsigned int size = skb_end_offset(skb) + skb->data_len;
Mel Gormanc93bdd02012-07-31 16:44:19 -07001045 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1046 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001047
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 if (!n)
1049 return NULL;
1050
1051 /* Set the data pointer */
1052 skb_reserve(n, headerlen);
1053 /* Set the tail pointer and length */
1054 skb_put(n, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
1056 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
1057 BUG();
1058
1059 copy_skb_header(n, skb);
1060 return n;
1061}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001062EXPORT_SYMBOL(skb_copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064/**
Octavian Purdilabad93e92014-06-12 01:36:26 +03001065 * __pskb_copy_fclone - create copy of an sk_buff with private head.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 * @skb: buffer to copy
Eric Dumazet117632e2011-12-03 21:39:53 +00001067 * @headroom: headroom of new skb
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 * @gfp_mask: allocation priority
Octavian Purdilabad93e92014-06-12 01:36:26 +03001069 * @fclone: if true allocate the copy of the skb from the fclone
1070 * cache instead of the head cache; it is recommended to set this
1071 * to true for the cases where the copy will likely be cloned
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 *
1073 * Make a copy of both an &sk_buff and part of its data, located
1074 * in header. Fragmented data remain shared. This is used when
1075 * the caller wishes to modify only header of &sk_buff and needs
1076 * private copy of the header to alter. Returns %NULL on failure
1077 * or the pointer to the buffer on success.
1078 * The returned buffer has a reference count of 1.
1079 */
1080
Octavian Purdilabad93e92014-06-12 01:36:26 +03001081struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1082 gfp_t gfp_mask, bool fclone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Eric Dumazet117632e2011-12-03 21:39:53 +00001084 unsigned int size = skb_headlen(skb) + headroom;
Octavian Purdilabad93e92014-06-12 01:36:26 +03001085 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1086 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 if (!n)
1089 goto out;
1090
1091 /* Set the data pointer */
Eric Dumazet117632e2011-12-03 21:39:53 +00001092 skb_reserve(n, headroom);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 /* Set the tail pointer and length */
1094 skb_put(n, skb_headlen(skb));
1095 /* Copy the bytes */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001096 skb_copy_from_linear_data(skb, n->data, n->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Herbert Xu25f484a2006-11-07 14:57:15 -08001098 n->truesize += skb->data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 n->data_len = skb->data_len;
1100 n->len = skb->len;
1101
1102 if (skb_shinfo(skb)->nr_frags) {
1103 int i;
1104
Michael S. Tsirkin70008aa2012-07-20 09:23:10 +00001105 if (skb_orphan_frags(skb, gfp_mask)) {
1106 kfree_skb(n);
1107 n = NULL;
1108 goto out;
Shirley Maa6686f22011-07-06 12:22:12 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1111 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
Ian Campbellea2ab692011-08-22 23:44:58 +00001112 skb_frag_ref(skb, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
1114 skb_shinfo(n)->nr_frags = i;
1115 }
1116
David S. Miller21dc3302010-08-23 00:13:46 -07001117 if (skb_has_frag_list(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1119 skb_clone_fraglist(n);
1120 }
1121
1122 copy_skb_header(n, skb);
1123out:
1124 return n;
1125}
Octavian Purdilabad93e92014-06-12 01:36:26 +03001126EXPORT_SYMBOL(__pskb_copy_fclone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128/**
1129 * pskb_expand_head - reallocate header of &sk_buff
1130 * @skb: buffer to reallocate
1131 * @nhead: room to add at head
1132 * @ntail: room to add at tail
1133 * @gfp_mask: allocation priority
1134 *
Mathias Krausebc323832013-11-07 14:18:26 +01001135 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1136 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 * reference count of 1. Returns zero in the case of success or error,
1138 * if expansion failed. In the last case, &sk_buff is not changed.
1139 *
1140 * All the pointers pointing into skb header may change and must be
1141 * reloaded after call to this function.
1142 */
1143
Victor Fusco86a76ca2005-07-08 14:57:47 -07001144int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
Al Virodd0fc662005-10-07 07:46:04 +01001145 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
1147 int i;
1148 u8 *data;
Alexander Duyckec47ea82012-05-04 14:26:56 +00001149 int size = nhead + skb_end_offset(skb) + ntail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 long off;
1151
Herbert Xu4edd87a2008-10-01 07:09:38 -07001152 BUG_ON(nhead < 0);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 if (skb_shared(skb))
1155 BUG();
1156
1157 size = SKB_DATA_ALIGN(size);
1158
Mel Gormanc93bdd02012-07-31 16:44:19 -07001159 if (skb_pfmemalloc(skb))
1160 gfp_mask |= __GFP_MEMALLOC;
1161 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1162 gfp_mask, NUMA_NO_NODE, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 if (!data)
1164 goto nodata;
Eric Dumazet87151b82012-04-10 20:08:39 +00001165 size = SKB_WITH_OVERHEAD(ksize(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 /* Copy only real data... and, alas, header. This should be
Eric Dumazet6602ceb2010-09-01 05:25:10 +00001168 * optimized for the cases when header is void.
1169 */
1170 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1171
1172 memcpy((struct skb_shared_info *)(data + size),
1173 skb_shinfo(skb),
Eric Dumazetfed66382010-07-22 19:09:08 +00001174 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Alexander Duyck3e245912012-05-04 14:26:51 +00001176 /*
1177 * if shinfo is shared we must drop the old head gracefully, but if it
1178 * is not we can just drop the old head and let the existing refcount
1179 * be since all we did is relocate the values
1180 */
1181 if (skb_cloned(skb)) {
Shirley Maa6686f22011-07-06 12:22:12 +00001182 /* copy this zero copy skb frags */
Michael S. Tsirkin70008aa2012-07-20 09:23:10 +00001183 if (skb_orphan_frags(skb, gfp_mask))
1184 goto nofrags;
Eric Dumazet1fd63042010-09-02 23:09:32 +00001185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
Ian Campbellea2ab692011-08-22 23:44:58 +00001186 skb_frag_ref(skb, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Eric Dumazet1fd63042010-09-02 23:09:32 +00001188 if (skb_has_frag_list(skb))
1189 skb_clone_fraglist(skb);
1190
1191 skb_release_data(skb);
Alexander Duyck3e245912012-05-04 14:26:51 +00001192 } else {
1193 skb_free_head(skb);
Eric Dumazet1fd63042010-09-02 23:09:32 +00001194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 off = (data + nhead) - skb->head;
1196
1197 skb->head = data;
Eric Dumazetd3836f22012-04-27 00:33:38 +00001198 skb->head_frag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 skb->data += off;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001200#ifdef NET_SKBUFF_DATA_USES_OFFSET
1201 skb->end = size;
Patrick McHardy56eb8882007-04-09 11:45:04 -07001202 off = nhead;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001203#else
1204 skb->end = skb->head + size;
Patrick McHardy56eb8882007-04-09 11:45:04 -07001205#endif
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001206 skb->tail += off;
Peter Pan(潘卫平)b41abb42013-06-06 21:27:21 +08001207 skb_headers_offset_update(skb, nhead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 skb->cloned = 0;
Patrick McHardy334a8132007-06-25 04:35:20 -07001209 skb->hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 skb->nohdr = 0;
1211 atomic_set(&skb_shinfo(skb)->dataref, 1);
1212 return 0;
1213
Shirley Maa6686f22011-07-06 12:22:12 +00001214nofrags:
1215 kfree(data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216nodata:
1217 return -ENOMEM;
1218}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001219EXPORT_SYMBOL(pskb_expand_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
1221/* Make private copy of skb with writable head and some headroom */
1222
1223struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1224{
1225 struct sk_buff *skb2;
1226 int delta = headroom - skb_headroom(skb);
1227
1228 if (delta <= 0)
1229 skb2 = pskb_copy(skb, GFP_ATOMIC);
1230 else {
1231 skb2 = skb_clone(skb, GFP_ATOMIC);
1232 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1233 GFP_ATOMIC)) {
1234 kfree_skb(skb2);
1235 skb2 = NULL;
1236 }
1237 }
1238 return skb2;
1239}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001240EXPORT_SYMBOL(skb_realloc_headroom);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
1242/**
1243 * skb_copy_expand - copy and expand sk_buff
1244 * @skb: buffer to copy
1245 * @newheadroom: new free bytes at head
1246 * @newtailroom: new free bytes at tail
1247 * @gfp_mask: allocation priority
1248 *
1249 * Make a copy of both an &sk_buff and its data and while doing so
1250 * allocate additional space.
1251 *
1252 * This is used when the caller wishes to modify the data and needs a
1253 * private copy of the data to alter as well as more space for new fields.
1254 * Returns %NULL on failure or the pointer to the buffer
1255 * on success. The returned buffer has a reference count of 1.
1256 *
1257 * You must pass %GFP_ATOMIC as the allocation priority if this function
1258 * is called from an interrupt.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 */
1260struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
Victor Fusco86a76ca2005-07-08 14:57:47 -07001261 int newheadroom, int newtailroom,
Al Virodd0fc662005-10-07 07:46:04 +01001262 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263{
1264 /*
1265 * Allocate the copy buffer
1266 */
Mel Gormanc93bdd02012-07-31 16:44:19 -07001267 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1268 gfp_mask, skb_alloc_rx_flag(skb),
1269 NUMA_NO_NODE);
Patrick McHardyefd1e8d2007-04-10 18:30:09 -07001270 int oldheadroom = skb_headroom(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 int head_copy_len, head_copy_off;
1272
1273 if (!n)
1274 return NULL;
1275
1276 skb_reserve(n, newheadroom);
1277
1278 /* Set the tail pointer and length */
1279 skb_put(n, skb->len);
1280
Patrick McHardyefd1e8d2007-04-10 18:30:09 -07001281 head_copy_len = oldheadroom;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 head_copy_off = 0;
1283 if (newheadroom <= head_copy_len)
1284 head_copy_len = newheadroom;
1285 else
1286 head_copy_off = newheadroom - head_copy_len;
1287
1288 /* Copy the linear header and data. */
1289 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1290 skb->len + head_copy_len))
1291 BUG();
1292
1293 copy_skb_header(n, skb);
1294
Eric Dumazet030737b2013-10-19 11:42:54 -07001295 skb_headers_offset_update(n, newheadroom - oldheadroom);
Patrick McHardyefd1e8d2007-04-10 18:30:09 -07001296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 return n;
1298}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001299EXPORT_SYMBOL(skb_copy_expand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301/**
1302 * skb_pad - zero pad the tail of an skb
1303 * @skb: buffer to pad
1304 * @pad: space to pad
1305 *
1306 * Ensure that a buffer is followed by a padding area that is zero
1307 * filled. Used by network drivers which may DMA or transfer data
1308 * beyond the buffer end onto the wire.
1309 *
Herbert Xu5b057c62006-06-23 02:06:41 -07001310 * May return error in out of memory cases. The skb is freed on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001312
Herbert Xu5b057c62006-06-23 02:06:41 -07001313int skb_pad(struct sk_buff *skb, int pad)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314{
Herbert Xu5b057c62006-06-23 02:06:41 -07001315 int err;
1316 int ntail;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 /* If the skbuff is non linear tailroom is always zero.. */
Herbert Xu5b057c62006-06-23 02:06:41 -07001319 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 memset(skb->data+skb->len, 0, pad);
Herbert Xu5b057c62006-06-23 02:06:41 -07001321 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 }
Herbert Xu5b057c62006-06-23 02:06:41 -07001323
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001324 ntail = skb->data_len + pad - (skb->end - skb->tail);
Herbert Xu5b057c62006-06-23 02:06:41 -07001325 if (likely(skb_cloned(skb) || ntail > 0)) {
1326 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1327 if (unlikely(err))
1328 goto free_skb;
1329 }
1330
1331 /* FIXME: The use of this function with non-linear skb's really needs
1332 * to be audited.
1333 */
1334 err = skb_linearize(skb);
1335 if (unlikely(err))
1336 goto free_skb;
1337
1338 memset(skb->data + skb->len, 0, pad);
1339 return 0;
1340
1341free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 kfree_skb(skb);
Herbert Xu5b057c62006-06-23 02:06:41 -07001343 return err;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001344}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001345EXPORT_SYMBOL(skb_pad);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001346
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001347/**
Mathias Krause0c7ddf32013-11-07 14:18:24 +01001348 * pskb_put - add data to the tail of a potentially fragmented buffer
1349 * @skb: start of the buffer to use
1350 * @tail: tail fragment of the buffer to use
1351 * @len: amount of data to add
1352 *
1353 * This function extends the used data area of the potentially
1354 * fragmented buffer. @tail must be the last fragment of @skb -- or
1355 * @skb itself. If this would exceed the total buffer size the kernel
1356 * will panic. A pointer to the first byte of the extra data is
1357 * returned.
1358 */
1359
1360unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1361{
1362 if (tail != skb) {
1363 skb->data_len += len;
1364 skb->len += len;
1365 }
1366 return skb_put(tail, len);
1367}
1368EXPORT_SYMBOL_GPL(pskb_put);
1369
1370/**
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001371 * skb_put - add data to a buffer
1372 * @skb: buffer to use
1373 * @len: amount of data to add
1374 *
1375 * This function extends the used data area of the buffer. If this would
1376 * exceed the total buffer size the kernel will panic. A pointer to the
1377 * first byte of the extra data is returned.
1378 */
1379unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1380{
1381 unsigned char *tmp = skb_tail_pointer(skb);
1382 SKB_LINEAR_ASSERT(skb);
1383 skb->tail += len;
1384 skb->len += len;
1385 if (unlikely(skb->tail > skb->end))
1386 skb_over_panic(skb, len, __builtin_return_address(0));
1387 return tmp;
1388}
1389EXPORT_SYMBOL(skb_put);
1390
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001391/**
Ilpo Järvinenc2aa2702008-03-27 17:52:40 -07001392 * skb_push - add data to the start of a buffer
1393 * @skb: buffer to use
1394 * @len: amount of data to add
1395 *
1396 * This function extends the used data area of the buffer at the buffer
1397 * start. If this would exceed the total buffer headroom the kernel will
1398 * panic. A pointer to the first byte of the extra data is returned.
1399 */
1400unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1401{
1402 skb->data -= len;
1403 skb->len += len;
1404 if (unlikely(skb->data<skb->head))
1405 skb_under_panic(skb, len, __builtin_return_address(0));
1406 return skb->data;
1407}
1408EXPORT_SYMBOL(skb_push);
1409
1410/**
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001411 * skb_pull - remove data from the start of a buffer
1412 * @skb: buffer to use
1413 * @len: amount of data to remove
1414 *
1415 * This function removes data from the start of a buffer, returning
1416 * the memory to the headroom. A pointer to the next data in the buffer
1417 * is returned. Once the data has been pulled future pushes will overwrite
1418 * the old data.
1419 */
1420unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1421{
David S. Miller47d29642010-05-02 02:21:44 -07001422 return skb_pull_inline(skb, len);
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001423}
1424EXPORT_SYMBOL(skb_pull);
1425
Ilpo Järvinen419ae742008-03-27 17:54:01 -07001426/**
1427 * skb_trim - remove end from a buffer
1428 * @skb: buffer to alter
1429 * @len: new length
1430 *
1431 * Cut the length of a buffer down by removing data from the tail. If
1432 * the buffer is already under the length specified it is not modified.
1433 * The skb must be linear.
1434 */
1435void skb_trim(struct sk_buff *skb, unsigned int len)
1436{
1437 if (skb->len > len)
1438 __skb_trim(skb, len);
1439}
1440EXPORT_SYMBOL(skb_trim);
1441
Herbert Xu3cc0e872006-06-09 16:13:38 -07001442/* Trims skb to length len. It can change skb pointers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 */
1444
Herbert Xu3cc0e872006-06-09 16:13:38 -07001445int ___pskb_trim(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
Herbert Xu27b437c2006-07-13 19:26:39 -07001447 struct sk_buff **fragp;
1448 struct sk_buff *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 int offset = skb_headlen(skb);
1450 int nfrags = skb_shinfo(skb)->nr_frags;
1451 int i;
Herbert Xu27b437c2006-07-13 19:26:39 -07001452 int err;
1453
1454 if (skb_cloned(skb) &&
1455 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1456 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001458 i = 0;
1459 if (offset >= len)
1460 goto drop_pages;
1461
1462 for (; i < nfrags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001463 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
Herbert Xu27b437c2006-07-13 19:26:39 -07001464
1465 if (end < len) {
1466 offset = end;
1467 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 }
Herbert Xu27b437c2006-07-13 19:26:39 -07001469
Eric Dumazet9e903e02011-10-18 21:00:24 +00001470 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
Herbert Xu27b437c2006-07-13 19:26:39 -07001471
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001472drop_pages:
Herbert Xu27b437c2006-07-13 19:26:39 -07001473 skb_shinfo(skb)->nr_frags = i;
1474
1475 for (; i < nfrags; i++)
Ian Campbellea2ab692011-08-22 23:44:58 +00001476 skb_frag_unref(skb, i);
Herbert Xu27b437c2006-07-13 19:26:39 -07001477
David S. Miller21dc3302010-08-23 00:13:46 -07001478 if (skb_has_frag_list(skb))
Herbert Xu27b437c2006-07-13 19:26:39 -07001479 skb_drop_fraglist(skb);
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001480 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 }
1482
Herbert Xu27b437c2006-07-13 19:26:39 -07001483 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1484 fragp = &frag->next) {
1485 int end = offset + frag->len;
1486
1487 if (skb_shared(frag)) {
1488 struct sk_buff *nfrag;
1489
1490 nfrag = skb_clone(frag, GFP_ATOMIC);
1491 if (unlikely(!nfrag))
1492 return -ENOMEM;
1493
1494 nfrag->next = frag->next;
Eric Dumazet85bb2a62012-04-19 02:24:53 +00001495 consume_skb(frag);
Herbert Xu27b437c2006-07-13 19:26:39 -07001496 frag = nfrag;
1497 *fragp = frag;
1498 }
1499
1500 if (end < len) {
1501 offset = end;
1502 continue;
1503 }
1504
1505 if (end > len &&
1506 unlikely((err = pskb_trim(frag, len - offset))))
1507 return err;
1508
1509 if (frag->next)
1510 skb_drop_list(&frag->next);
1511 break;
1512 }
1513
Herbert Xuf4d26fb2006-07-30 20:20:28 -07001514done:
Herbert Xu27b437c2006-07-13 19:26:39 -07001515 if (len > skb_headlen(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 skb->data_len -= skb->len - len;
1517 skb->len = len;
1518 } else {
Herbert Xu27b437c2006-07-13 19:26:39 -07001519 skb->len = len;
1520 skb->data_len = 0;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001521 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 }
1523
1524 return 0;
1525}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001526EXPORT_SYMBOL(___pskb_trim);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
1528/**
1529 * __pskb_pull_tail - advance tail of skb header
1530 * @skb: buffer to reallocate
1531 * @delta: number of bytes to advance tail
1532 *
1533 * The function makes a sense only on a fragmented &sk_buff,
1534 * it expands header moving its tail forward and copying necessary
1535 * data from fragmented part.
1536 *
1537 * &sk_buff MUST have reference count of 1.
1538 *
1539 * Returns %NULL (and &sk_buff does not change) if pull failed
1540 * or value of new tail of skb in the case of success.
1541 *
1542 * All the pointers pointing into skb header may change and must be
1543 * reloaded after call to this function.
1544 */
1545
1546/* Moves tail of skb head forward, copying data from fragmented part,
1547 * when it is necessary.
1548 * 1. It may fail due to malloc failure.
1549 * 2. It may change skb pointers.
1550 *
1551 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1552 */
1553unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1554{
1555 /* If skb has not enough free space at tail, get new one
1556 * plus 128 bytes for future expansions. If we have enough
1557 * room at tail, reallocate without expansion only if skb is cloned.
1558 */
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001559 int i, k, eat = (skb->tail + delta) - skb->end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561 if (eat > 0 || skb_cloned(skb)) {
1562 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1563 GFP_ATOMIC))
1564 return NULL;
1565 }
1566
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001567 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 BUG();
1569
1570 /* Optimization: no fragments, no reasons to preestimate
1571 * size of pulled pages. Superb.
1572 */
David S. Miller21dc3302010-08-23 00:13:46 -07001573 if (!skb_has_frag_list(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 goto pull_pages;
1575
1576 /* Estimate size of pulled pages. */
1577 eat = delta;
1578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001579 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1580
1581 if (size >= eat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 goto pull_pages;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001583 eat -= size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 }
1585
1586 /* If we need update frag list, we are in troubles.
1587 * Certainly, it possible to add an offset to skb data,
1588 * but taking into account that pulling is expected to
1589 * be very rare operation, it is worth to fight against
1590 * further bloating skb head and crucify ourselves here instead.
1591 * Pure masohism, indeed. 8)8)
1592 */
1593 if (eat) {
1594 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1595 struct sk_buff *clone = NULL;
1596 struct sk_buff *insp = NULL;
1597
1598 do {
Kris Katterjohn09a62662006-01-08 22:24:28 -08001599 BUG_ON(!list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
1601 if (list->len <= eat) {
1602 /* Eaten as whole. */
1603 eat -= list->len;
1604 list = list->next;
1605 insp = list;
1606 } else {
1607 /* Eaten partially. */
1608
1609 if (skb_shared(list)) {
1610 /* Sucks! We need to fork list. :-( */
1611 clone = skb_clone(list, GFP_ATOMIC);
1612 if (!clone)
1613 return NULL;
1614 insp = list->next;
1615 list = clone;
1616 } else {
1617 /* This may be pulled without
1618 * problems. */
1619 insp = list;
1620 }
1621 if (!pskb_pull(list, eat)) {
Wei Yongjunf3fbbe02009-02-25 00:37:32 +00001622 kfree_skb(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 return NULL;
1624 }
1625 break;
1626 }
1627 } while (eat);
1628
1629 /* Free pulled out fragments. */
1630 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1631 skb_shinfo(skb)->frag_list = list->next;
1632 kfree_skb(list);
1633 }
1634 /* And insert new clone at head. */
1635 if (clone) {
1636 clone->next = list;
1637 skb_shinfo(skb)->frag_list = clone;
1638 }
1639 }
1640 /* Success! Now we may commit changes to skb data. */
1641
1642pull_pages:
1643 eat = delta;
1644 k = 0;
1645 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001646 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1647
1648 if (size <= eat) {
Ian Campbellea2ab692011-08-22 23:44:58 +00001649 skb_frag_unref(skb, i);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001650 eat -= size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 } else {
1652 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1653 if (eat) {
1654 skb_shinfo(skb)->frags[k].page_offset += eat;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001655 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 eat = 0;
1657 }
1658 k++;
1659 }
1660 }
1661 skb_shinfo(skb)->nr_frags = k;
1662
1663 skb->tail += delta;
1664 skb->data_len -= delta;
1665
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001666 return skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001668EXPORT_SYMBOL(__pskb_pull_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Eric Dumazet22019b12011-07-29 18:37:31 +00001670/**
1671 * skb_copy_bits - copy bits from skb to kernel buffer
1672 * @skb: source skb
1673 * @offset: offset in source
1674 * @to: destination buffer
1675 * @len: number of bytes to copy
1676 *
1677 * Copy the specified number of bytes from the source skb to the
1678 * destination buffer.
1679 *
1680 * CAUTION ! :
1681 * If its prototype is ever changed,
1682 * check arch/{*}/net/{*}.S files,
1683 * since it is called from BPF assembly code.
1684 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1686{
David S. Miller1a028e52007-04-27 15:21:23 -07001687 int start = skb_headlen(skb);
David S. Millerfbb398a2009-06-09 00:18:59 -07001688 struct sk_buff *frag_iter;
1689 int i, copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 if (offset > (int)skb->len - len)
1692 goto fault;
1693
1694 /* Copy header. */
David S. Miller1a028e52007-04-27 15:21:23 -07001695 if ((copy = start - offset) > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 if (copy > len)
1697 copy = len;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001698 skb_copy_from_linear_data_offset(skb, offset, to, copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 if ((len -= copy) == 0)
1700 return 0;
1701 offset += copy;
1702 to += copy;
1703 }
1704
1705 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07001706 int end;
Eric Dumazet51c56b02012-04-05 11:35:15 +02001707 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001709 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07001710
Eric Dumazet51c56b02012-04-05 11:35:15 +02001711 end = start + skb_frag_size(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 if ((copy = end - offset) > 0) {
1713 u8 *vaddr;
1714
1715 if (copy > len)
1716 copy = len;
1717
Eric Dumazet51c56b02012-04-05 11:35:15 +02001718 vaddr = kmap_atomic(skb_frag_page(f));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 memcpy(to,
Eric Dumazet51c56b02012-04-05 11:35:15 +02001720 vaddr + f->page_offset + offset - start,
1721 copy);
1722 kunmap_atomic(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 if ((len -= copy) == 0)
1725 return 0;
1726 offset += copy;
1727 to += copy;
1728 }
David S. Miller1a028e52007-04-27 15:21:23 -07001729 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 }
1731
David S. Millerfbb398a2009-06-09 00:18:59 -07001732 skb_walk_frags(skb, frag_iter) {
1733 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
David S. Millerfbb398a2009-06-09 00:18:59 -07001735 WARN_ON(start > offset + len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
David S. Millerfbb398a2009-06-09 00:18:59 -07001737 end = start + frag_iter->len;
1738 if ((copy = end - offset) > 0) {
1739 if (copy > len)
1740 copy = len;
1741 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1742 goto fault;
1743 if ((len -= copy) == 0)
1744 return 0;
1745 offset += copy;
1746 to += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 }
David S. Millerfbb398a2009-06-09 00:18:59 -07001748 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 }
Shirley Maa6686f22011-07-06 12:22:12 +00001750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 if (!len)
1752 return 0;
1753
1754fault:
1755 return -EFAULT;
1756}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08001757EXPORT_SYMBOL(skb_copy_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
Jens Axboe9c55e012007-11-06 23:30:13 -08001759/*
1760 * Callback from splice_to_pipe(), if we need to release some pages
1761 * at the end of the spd in case we error'ed out in filling the pipe.
1762 */
1763static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1764{
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001765 put_page(spd->pages[i]);
1766}
Jens Axboe9c55e012007-11-06 23:30:13 -08001767
David S. Millera108d5f2012-04-23 23:06:11 -04001768static struct page *linear_to_page(struct page *page, unsigned int *len,
1769 unsigned int *offset,
Eric Dumazet18aafc62013-01-11 14:46:37 +00001770 struct sock *sk)
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001771{
Eric Dumazet5640f762012-09-23 23:04:42 +00001772 struct page_frag *pfrag = sk_page_frag(sk);
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001773
Eric Dumazet5640f762012-09-23 23:04:42 +00001774 if (!sk_page_frag_refill(sk, pfrag))
1775 return NULL;
Jarek Poplawski4fb66992009-02-01 00:41:42 -08001776
Eric Dumazet5640f762012-09-23 23:04:42 +00001777 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
Jarek Poplawski4fb66992009-02-01 00:41:42 -08001778
Eric Dumazet5640f762012-09-23 23:04:42 +00001779 memcpy(page_address(pfrag->page) + pfrag->offset,
1780 page_address(page) + *offset, *len);
1781 *offset = pfrag->offset;
1782 pfrag->offset += *len;
Jarek Poplawski4fb66992009-02-01 00:41:42 -08001783
Eric Dumazet5640f762012-09-23 23:04:42 +00001784 return pfrag->page;
Jens Axboe9c55e012007-11-06 23:30:13 -08001785}
1786
Eric Dumazet41c73a02012-04-22 12:26:16 +00001787static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1788 struct page *page,
1789 unsigned int offset)
1790{
1791 return spd->nr_pages &&
1792 spd->pages[spd->nr_pages - 1] == page &&
1793 (spd->partial[spd->nr_pages - 1].offset +
1794 spd->partial[spd->nr_pages - 1].len == offset);
1795}
1796
Jens Axboe9c55e012007-11-06 23:30:13 -08001797/*
1798 * Fill page/offset/length into spd, if it can hold more pages.
1799 */
David S. Millera108d5f2012-04-23 23:06:11 -04001800static bool spd_fill_page(struct splice_pipe_desc *spd,
1801 struct pipe_inode_info *pipe, struct page *page,
1802 unsigned int *len, unsigned int offset,
Eric Dumazet18aafc62013-01-11 14:46:37 +00001803 bool linear,
David S. Millera108d5f2012-04-23 23:06:11 -04001804 struct sock *sk)
Jens Axboe9c55e012007-11-06 23:30:13 -08001805{
Eric Dumazet41c73a02012-04-22 12:26:16 +00001806 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
David S. Millera108d5f2012-04-23 23:06:11 -04001807 return true;
Jens Axboe9c55e012007-11-06 23:30:13 -08001808
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001809 if (linear) {
Eric Dumazet18aafc62013-01-11 14:46:37 +00001810 page = linear_to_page(page, len, &offset, sk);
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001811 if (!page)
David S. Millera108d5f2012-04-23 23:06:11 -04001812 return true;
Eric Dumazet41c73a02012-04-22 12:26:16 +00001813 }
1814 if (spd_can_coalesce(spd, page, offset)) {
1815 spd->partial[spd->nr_pages - 1].len += *len;
David S. Millera108d5f2012-04-23 23:06:11 -04001816 return false;
Eric Dumazet41c73a02012-04-22 12:26:16 +00001817 }
1818 get_page(page);
Jens Axboe9c55e012007-11-06 23:30:13 -08001819 spd->pages[spd->nr_pages] = page;
Jarek Poplawski4fb66992009-02-01 00:41:42 -08001820 spd->partial[spd->nr_pages].len = *len;
Jens Axboe9c55e012007-11-06 23:30:13 -08001821 spd->partial[spd->nr_pages].offset = offset;
Jens Axboe9c55e012007-11-06 23:30:13 -08001822 spd->nr_pages++;
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001823
David S. Millera108d5f2012-04-23 23:06:11 -04001824 return false;
Jens Axboe9c55e012007-11-06 23:30:13 -08001825}
1826
David S. Millera108d5f2012-04-23 23:06:11 -04001827static bool __splice_segment(struct page *page, unsigned int poff,
1828 unsigned int plen, unsigned int *off,
Eric Dumazet18aafc62013-01-11 14:46:37 +00001829 unsigned int *len,
Eric Dumazetd7ccf7c2012-04-23 23:35:04 -04001830 struct splice_pipe_desc *spd, bool linear,
David S. Millera108d5f2012-04-23 23:06:11 -04001831 struct sock *sk,
1832 struct pipe_inode_info *pipe)
Octavian Purdila2870c432008-07-15 00:49:11 -07001833{
1834 if (!*len)
David S. Millera108d5f2012-04-23 23:06:11 -04001835 return true;
Octavian Purdila2870c432008-07-15 00:49:11 -07001836
1837 /* skip this segment if already processed */
1838 if (*off >= plen) {
1839 *off -= plen;
David S. Millera108d5f2012-04-23 23:06:11 -04001840 return false;
Octavian Purdiladb43a282008-06-27 17:27:21 -07001841 }
Jens Axboe9c55e012007-11-06 23:30:13 -08001842
Octavian Purdila2870c432008-07-15 00:49:11 -07001843 /* ignore any bits we already processed */
Eric Dumazet9ca1b222013-01-05 21:31:18 +00001844 poff += *off;
1845 plen -= *off;
1846 *off = 0;
Octavian Purdila2870c432008-07-15 00:49:11 -07001847
Eric Dumazet18aafc62013-01-11 14:46:37 +00001848 do {
1849 unsigned int flen = min(*len, plen);
Octavian Purdila2870c432008-07-15 00:49:11 -07001850
Eric Dumazet18aafc62013-01-11 14:46:37 +00001851 if (spd_fill_page(spd, pipe, page, &flen, poff,
1852 linear, sk))
1853 return true;
1854 poff += flen;
1855 plen -= flen;
1856 *len -= flen;
1857 } while (*len && plen);
Octavian Purdila2870c432008-07-15 00:49:11 -07001858
David S. Millera108d5f2012-04-23 23:06:11 -04001859 return false;
Octavian Purdila2870c432008-07-15 00:49:11 -07001860}
1861
1862/*
David S. Millera108d5f2012-04-23 23:06:11 -04001863 * Map linear and fragment data from the skb to spd. It reports true if the
Octavian Purdila2870c432008-07-15 00:49:11 -07001864 * pipe is full or if we already spliced the requested length.
1865 */
David S. Millera108d5f2012-04-23 23:06:11 -04001866static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1867 unsigned int *offset, unsigned int *len,
1868 struct splice_pipe_desc *spd, struct sock *sk)
Octavian Purdila2870c432008-07-15 00:49:11 -07001869{
1870 int seg;
1871
Eric Dumazet1d0c0b32012-04-27 02:10:03 +00001872 /* map the linear part :
Alexander Duyck2996d312012-05-02 18:18:42 +00001873 * If skb->head_frag is set, this 'linear' part is backed by a
1874 * fragment, and if the head is not shared with any clones then
1875 * we can avoid a copy since we own the head portion of this page.
Jens Axboe9c55e012007-11-06 23:30:13 -08001876 */
Octavian Purdila2870c432008-07-15 00:49:11 -07001877 if (__splice_segment(virt_to_page(skb->data),
1878 (unsigned long) skb->data & (PAGE_SIZE - 1),
1879 skb_headlen(skb),
Eric Dumazet18aafc62013-01-11 14:46:37 +00001880 offset, len, spd,
Alexander Duyck3a7c1ee42012-05-03 01:09:42 +00001881 skb_head_is_locked(skb),
Eric Dumazet1d0c0b32012-04-27 02:10:03 +00001882 sk, pipe))
David S. Millera108d5f2012-04-23 23:06:11 -04001883 return true;
Jens Axboe9c55e012007-11-06 23:30:13 -08001884
1885 /*
1886 * then map the fragments
1887 */
Jens Axboe9c55e012007-11-06 23:30:13 -08001888 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1889 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1890
Ian Campbellea2ab692011-08-22 23:44:58 +00001891 if (__splice_segment(skb_frag_page(f),
Eric Dumazet9e903e02011-10-18 21:00:24 +00001892 f->page_offset, skb_frag_size(f),
Eric Dumazet18aafc62013-01-11 14:46:37 +00001893 offset, len, spd, false, sk, pipe))
David S. Millera108d5f2012-04-23 23:06:11 -04001894 return true;
Jens Axboe9c55e012007-11-06 23:30:13 -08001895 }
1896
David S. Millera108d5f2012-04-23 23:06:11 -04001897 return false;
Jens Axboe9c55e012007-11-06 23:30:13 -08001898}
1899
1900/*
1901 * Map data from the skb to a pipe. Should handle both the linear part,
1902 * the fragments, and the frag list. It does NOT handle frag lists within
1903 * the frag list, if such a thing exists. We'd probably need to recurse to
1904 * handle that cleanly.
1905 */
Jarek Poplawski8b9d3722009-01-19 17:03:56 -08001906int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
Jens Axboe9c55e012007-11-06 23:30:13 -08001907 struct pipe_inode_info *pipe, unsigned int tlen,
1908 unsigned int flags)
1909{
Eric Dumazet41c73a02012-04-22 12:26:16 +00001910 struct partial_page partial[MAX_SKB_FRAGS];
1911 struct page *pages[MAX_SKB_FRAGS];
Jens Axboe9c55e012007-11-06 23:30:13 -08001912 struct splice_pipe_desc spd = {
1913 .pages = pages,
1914 .partial = partial,
Eric Dumazet047fe362012-06-12 15:24:40 +02001915 .nr_pages_max = MAX_SKB_FRAGS,
Jens Axboe9c55e012007-11-06 23:30:13 -08001916 .flags = flags,
Miklos Szeredi28a625c2014-01-22 19:36:57 +01001917 .ops = &nosteal_pipe_buf_ops,
Jens Axboe9c55e012007-11-06 23:30:13 -08001918 .spd_release = sock_spd_release,
1919 };
David S. Millerfbb398a2009-06-09 00:18:59 -07001920 struct sk_buff *frag_iter;
Jarek Poplawski7a67e562009-04-30 05:41:19 -07001921 struct sock *sk = skb->sk;
Jens Axboe35f3d142010-05-20 10:43:18 +02001922 int ret = 0;
1923
Jens Axboe9c55e012007-11-06 23:30:13 -08001924 /*
1925 * __skb_splice_bits() only fails if the output has no room left,
1926 * so no point in going over the frag_list for the error case.
1927 */
Jens Axboe35f3d142010-05-20 10:43:18 +02001928 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
Jens Axboe9c55e012007-11-06 23:30:13 -08001929 goto done;
1930 else if (!tlen)
1931 goto done;
1932
1933 /*
1934 * now see if we have a frag_list to map
1935 */
David S. Millerfbb398a2009-06-09 00:18:59 -07001936 skb_walk_frags(skb, frag_iter) {
1937 if (!tlen)
1938 break;
Jens Axboe35f3d142010-05-20 10:43:18 +02001939 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
David S. Millerfbb398a2009-06-09 00:18:59 -07001940 break;
Jens Axboe9c55e012007-11-06 23:30:13 -08001941 }
1942
1943done:
Jens Axboe9c55e012007-11-06 23:30:13 -08001944 if (spd.nr_pages) {
Jens Axboe9c55e012007-11-06 23:30:13 -08001945 /*
1946 * Drop the socket lock, otherwise we have reverse
1947 * locking dependencies between sk_lock and i_mutex
1948 * here as compared to sendfile(). We enter here
1949 * with the socket lock held, and splice_to_pipe() will
1950 * grab the pipe inode lock. For sendfile() emulation,
1951 * we call into ->sendpage() with the i_mutex lock held
1952 * and networking will grab the socket lock.
1953 */
Octavian Purdila293ad602008-06-04 15:45:58 -07001954 release_sock(sk);
Jens Axboe9c55e012007-11-06 23:30:13 -08001955 ret = splice_to_pipe(pipe, &spd);
Octavian Purdila293ad602008-06-04 15:45:58 -07001956 lock_sock(sk);
Jens Axboe9c55e012007-11-06 23:30:13 -08001957 }
1958
Jens Axboe35f3d142010-05-20 10:43:18 +02001959 return ret;
Jens Axboe9c55e012007-11-06 23:30:13 -08001960}
1961
Herbert Xu357b40a2005-04-19 22:30:14 -07001962/**
1963 * skb_store_bits - store bits from kernel buffer to skb
1964 * @skb: destination buffer
1965 * @offset: offset in destination
1966 * @from: source buffer
1967 * @len: number of bytes to copy
1968 *
1969 * Copy the specified number of bytes from the source buffer to the
1970 * destination skb. This function handles all the messy bits of
1971 * traversing fragment lists and such.
1972 */
1973
Stephen Hemminger0c6fcc82007-04-20 16:40:01 -07001974int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
Herbert Xu357b40a2005-04-19 22:30:14 -07001975{
David S. Miller1a028e52007-04-27 15:21:23 -07001976 int start = skb_headlen(skb);
David S. Millerfbb398a2009-06-09 00:18:59 -07001977 struct sk_buff *frag_iter;
1978 int i, copy;
Herbert Xu357b40a2005-04-19 22:30:14 -07001979
1980 if (offset > (int)skb->len - len)
1981 goto fault;
1982
David S. Miller1a028e52007-04-27 15:21:23 -07001983 if ((copy = start - offset) > 0) {
Herbert Xu357b40a2005-04-19 22:30:14 -07001984 if (copy > len)
1985 copy = len;
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001986 skb_copy_to_linear_data_offset(skb, offset, from, copy);
Herbert Xu357b40a2005-04-19 22:30:14 -07001987 if ((len -= copy) == 0)
1988 return 0;
1989 offset += copy;
1990 from += copy;
1991 }
1992
1993 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1994 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
David S. Miller1a028e52007-04-27 15:21:23 -07001995 int end;
Herbert Xu357b40a2005-04-19 22:30:14 -07001996
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001997 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07001998
Eric Dumazet9e903e02011-10-18 21:00:24 +00001999 end = start + skb_frag_size(frag);
Herbert Xu357b40a2005-04-19 22:30:14 -07002000 if ((copy = end - offset) > 0) {
2001 u8 *vaddr;
2002
2003 if (copy > len)
2004 copy = len;
2005
Eric Dumazet51c56b02012-04-05 11:35:15 +02002006 vaddr = kmap_atomic(skb_frag_page(frag));
David S. Miller1a028e52007-04-27 15:21:23 -07002007 memcpy(vaddr + frag->page_offset + offset - start,
2008 from, copy);
Eric Dumazet51c56b02012-04-05 11:35:15 +02002009 kunmap_atomic(vaddr);
Herbert Xu357b40a2005-04-19 22:30:14 -07002010
2011 if ((len -= copy) == 0)
2012 return 0;
2013 offset += copy;
2014 from += copy;
2015 }
David S. Miller1a028e52007-04-27 15:21:23 -07002016 start = end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002017 }
2018
David S. Millerfbb398a2009-06-09 00:18:59 -07002019 skb_walk_frags(skb, frag_iter) {
2020 int end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002021
David S. Millerfbb398a2009-06-09 00:18:59 -07002022 WARN_ON(start > offset + len);
Herbert Xu357b40a2005-04-19 22:30:14 -07002023
David S. Millerfbb398a2009-06-09 00:18:59 -07002024 end = start + frag_iter->len;
2025 if ((copy = end - offset) > 0) {
2026 if (copy > len)
2027 copy = len;
2028 if (skb_store_bits(frag_iter, offset - start,
2029 from, copy))
2030 goto fault;
2031 if ((len -= copy) == 0)
2032 return 0;
2033 offset += copy;
2034 from += copy;
Herbert Xu357b40a2005-04-19 22:30:14 -07002035 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002036 start = end;
Herbert Xu357b40a2005-04-19 22:30:14 -07002037 }
2038 if (!len)
2039 return 0;
2040
2041fault:
2042 return -EFAULT;
2043}
Herbert Xu357b40a2005-04-19 22:30:14 -07002044EXPORT_SYMBOL(skb_store_bits);
2045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046/* Checksum skb data. */
Daniel Borkmann2817a332013-10-30 11:50:51 +01002047__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2048 __wsum csum, const struct skb_checksum_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049{
David S. Miller1a028e52007-04-27 15:21:23 -07002050 int start = skb_headlen(skb);
2051 int i, copy = start - offset;
David S. Millerfbb398a2009-06-09 00:18:59 -07002052 struct sk_buff *frag_iter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 int pos = 0;
2054
2055 /* Checksum header. */
2056 if (copy > 0) {
2057 if (copy > len)
2058 copy = len;
Daniel Borkmann2817a332013-10-30 11:50:51 +01002059 csum = ops->update(skb->data + offset, copy, csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 if ((len -= copy) == 0)
2061 return csum;
2062 offset += copy;
2063 pos = copy;
2064 }
2065
2066 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07002067 int end;
Eric Dumazet51c56b02012-04-05 11:35:15 +02002068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002070 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07002071
Eric Dumazet51c56b02012-04-05 11:35:15 +02002072 end = start + skb_frag_size(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 if ((copy = end - offset) > 0) {
Al Viro44bb9362006-11-14 21:36:14 -08002074 __wsum csum2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 u8 *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
2077 if (copy > len)
2078 copy = len;
Eric Dumazet51c56b02012-04-05 11:35:15 +02002079 vaddr = kmap_atomic(skb_frag_page(frag));
Daniel Borkmann2817a332013-10-30 11:50:51 +01002080 csum2 = ops->update(vaddr + frag->page_offset +
2081 offset - start, copy, 0);
Eric Dumazet51c56b02012-04-05 11:35:15 +02002082 kunmap_atomic(vaddr);
Daniel Borkmann2817a332013-10-30 11:50:51 +01002083 csum = ops->combine(csum, csum2, pos, copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 if (!(len -= copy))
2085 return csum;
2086 offset += copy;
2087 pos += copy;
2088 }
David S. Miller1a028e52007-04-27 15:21:23 -07002089 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 }
2091
David S. Millerfbb398a2009-06-09 00:18:59 -07002092 skb_walk_frags(skb, frag_iter) {
2093 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
David S. Millerfbb398a2009-06-09 00:18:59 -07002095 WARN_ON(start > offset + len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
David S. Millerfbb398a2009-06-09 00:18:59 -07002097 end = start + frag_iter->len;
2098 if ((copy = end - offset) > 0) {
2099 __wsum csum2;
2100 if (copy > len)
2101 copy = len;
Daniel Borkmann2817a332013-10-30 11:50:51 +01002102 csum2 = __skb_checksum(frag_iter, offset - start,
2103 copy, 0, ops);
2104 csum = ops->combine(csum, csum2, pos, copy);
David S. Millerfbb398a2009-06-09 00:18:59 -07002105 if ((len -= copy) == 0)
2106 return csum;
2107 offset += copy;
2108 pos += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002110 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 }
Kris Katterjohn09a62662006-01-08 22:24:28 -08002112 BUG_ON(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 return csum;
2115}
Daniel Borkmann2817a332013-10-30 11:50:51 +01002116EXPORT_SYMBOL(__skb_checksum);
2117
2118__wsum skb_checksum(const struct sk_buff *skb, int offset,
2119 int len, __wsum csum)
2120{
2121 const struct skb_checksum_ops ops = {
Daniel Borkmanncea80ea2013-11-04 17:10:25 +01002122 .update = csum_partial_ext,
Daniel Borkmann2817a332013-10-30 11:50:51 +01002123 .combine = csum_block_add_ext,
2124 };
2125
2126 return __skb_checksum(skb, offset, len, csum, &ops);
2127}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002128EXPORT_SYMBOL(skb_checksum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130/* Both of above in one bottle. */
2131
Al Viro81d77662006-11-14 21:37:33 -08002132__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2133 u8 *to, int len, __wsum csum)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134{
David S. Miller1a028e52007-04-27 15:21:23 -07002135 int start = skb_headlen(skb);
2136 int i, copy = start - offset;
David S. Millerfbb398a2009-06-09 00:18:59 -07002137 struct sk_buff *frag_iter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 int pos = 0;
2139
2140 /* Copy header. */
2141 if (copy > 0) {
2142 if (copy > len)
2143 copy = len;
2144 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2145 copy, csum);
2146 if ((len -= copy) == 0)
2147 return csum;
2148 offset += copy;
2149 to += copy;
2150 pos = copy;
2151 }
2152
2153 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07002154 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002156 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07002157
Eric Dumazet9e903e02011-10-18 21:00:24 +00002158 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 if ((copy = end - offset) > 0) {
Al Viro50842052006-11-14 21:36:34 -08002160 __wsum csum2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 u8 *vaddr;
2162 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2163
2164 if (copy > len)
2165 copy = len;
Eric Dumazet51c56b02012-04-05 11:35:15 +02002166 vaddr = kmap_atomic(skb_frag_page(frag));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 csum2 = csum_partial_copy_nocheck(vaddr +
David S. Miller1a028e52007-04-27 15:21:23 -07002168 frag->page_offset +
2169 offset - start, to,
2170 copy, 0);
Eric Dumazet51c56b02012-04-05 11:35:15 +02002171 kunmap_atomic(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 csum = csum_block_add(csum, csum2, pos);
2173 if (!(len -= copy))
2174 return csum;
2175 offset += copy;
2176 to += copy;
2177 pos += copy;
2178 }
David S. Miller1a028e52007-04-27 15:21:23 -07002179 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 }
2181
David S. Millerfbb398a2009-06-09 00:18:59 -07002182 skb_walk_frags(skb, frag_iter) {
2183 __wsum csum2;
2184 int end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
David S. Millerfbb398a2009-06-09 00:18:59 -07002186 WARN_ON(start > offset + len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
David S. Millerfbb398a2009-06-09 00:18:59 -07002188 end = start + frag_iter->len;
2189 if ((copy = end - offset) > 0) {
2190 if (copy > len)
2191 copy = len;
2192 csum2 = skb_copy_and_csum_bits(frag_iter,
2193 offset - start,
2194 to, copy, 0);
2195 csum = csum_block_add(csum, csum2, pos);
2196 if ((len -= copy) == 0)
2197 return csum;
2198 offset += copy;
2199 to += copy;
2200 pos += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 }
David S. Millerfbb398a2009-06-09 00:18:59 -07002202 start = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 }
Kris Katterjohn09a62662006-01-08 22:24:28 -08002204 BUG_ON(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 return csum;
2206}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002207EXPORT_SYMBOL(skb_copy_and_csum_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
Thomas Grafaf2806f2013-12-13 15:22:17 +01002209 /**
2210 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2211 * @from: source buffer
2212 *
2213 * Calculates the amount of linear headroom needed in the 'to' skb passed
2214 * into skb_zerocopy().
2215 */
2216unsigned int
2217skb_zerocopy_headlen(const struct sk_buff *from)
2218{
2219 unsigned int hlen = 0;
2220
2221 if (!from->head_frag ||
2222 skb_headlen(from) < L1_CACHE_BYTES ||
2223 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2224 hlen = skb_headlen(from);
2225
2226 if (skb_has_frag_list(from))
2227 hlen = from->len;
2228
2229 return hlen;
2230}
2231EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2232
2233/**
2234 * skb_zerocopy - Zero copy skb to skb
2235 * @to: destination buffer
Masanari Iida7fceb4d2014-01-29 01:05:28 +09002236 * @from: source buffer
Thomas Grafaf2806f2013-12-13 15:22:17 +01002237 * @len: number of bytes to copy from source buffer
2238 * @hlen: size of linear headroom in destination buffer
2239 *
2240 * Copies up to `len` bytes from `from` to `to` by creating references
2241 * to the frags in the source buffer.
2242 *
2243 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2244 * headroom in the `to` buffer.
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002245 *
2246 * Return value:
2247 * 0: everything is OK
2248 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2249 * -EFAULT: skb_copy_bits() found some problem with skb geometry
Thomas Grafaf2806f2013-12-13 15:22:17 +01002250 */
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002251int
2252skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
Thomas Grafaf2806f2013-12-13 15:22:17 +01002253{
2254 int i, j = 0;
2255 int plen = 0; /* length of skb->head fragment */
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002256 int ret;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002257 struct page *page;
2258 unsigned int offset;
2259
2260 BUG_ON(!from->head_frag && !hlen);
2261
2262 /* dont bother with small payloads */
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002263 if (len <= skb_tailroom(to))
2264 return skb_copy_bits(from, 0, skb_put(to, len), len);
Thomas Grafaf2806f2013-12-13 15:22:17 +01002265
2266 if (hlen) {
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002267 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2268 if (unlikely(ret))
2269 return ret;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002270 len -= hlen;
2271 } else {
2272 plen = min_t(int, skb_headlen(from), len);
2273 if (plen) {
2274 page = virt_to_head_page(from->head);
2275 offset = from->data - (unsigned char *)page_address(page);
2276 __skb_fill_page_desc(to, 0, page, offset, plen);
2277 get_page(page);
2278 j = 1;
2279 len -= plen;
2280 }
2281 }
2282
2283 to->truesize += len + plen;
2284 to->len += len + plen;
2285 to->data_len += len + plen;
2286
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002287 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2288 skb_tx_error(from);
2289 return -ENOMEM;
2290 }
2291
Thomas Grafaf2806f2013-12-13 15:22:17 +01002292 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2293 if (!len)
2294 break;
2295 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2296 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2297 len -= skb_shinfo(to)->frags[j].size;
2298 skb_frag_ref(to, j);
2299 j++;
2300 }
2301 skb_shinfo(to)->nr_frags = j;
Zoltan Kiss36d5fe62014-03-26 22:37:45 +00002302
2303 return 0;
Thomas Grafaf2806f2013-12-13 15:22:17 +01002304}
2305EXPORT_SYMBOL_GPL(skb_zerocopy);
2306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2308{
Al Virod3bc23e2006-11-14 21:24:49 -08002309 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 long csstart;
2311
Patrick McHardy84fa7932006-08-29 16:44:56 -07002312 if (skb->ip_summed == CHECKSUM_PARTIAL)
Michał Mirosław55508d62010-12-14 15:24:08 +00002313 csstart = skb_checksum_start_offset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 else
2315 csstart = skb_headlen(skb);
2316
Kris Katterjohn09a62662006-01-08 22:24:28 -08002317 BUG_ON(csstart > skb_headlen(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002319 skb_copy_from_linear_data(skb, to, csstart);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
2321 csum = 0;
2322 if (csstart != skb->len)
2323 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2324 skb->len - csstart, 0);
2325
Patrick McHardy84fa7932006-08-29 16:44:56 -07002326 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Al Viroff1dcad2006-11-20 18:07:29 -08002327 long csstuff = csstart + skb->csum_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
Al Virod3bc23e2006-11-14 21:24:49 -08002329 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 }
2331}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002332EXPORT_SYMBOL(skb_copy_and_csum_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
2334/**
2335 * skb_dequeue - remove from the head of the queue
2336 * @list: list to dequeue from
2337 *
2338 * Remove the head of the list. The list lock is taken so the function
2339 * may be used safely with other locking list functions. The head item is
2340 * returned or %NULL if the list is empty.
2341 */
2342
2343struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2344{
2345 unsigned long flags;
2346 struct sk_buff *result;
2347
2348 spin_lock_irqsave(&list->lock, flags);
2349 result = __skb_dequeue(list);
2350 spin_unlock_irqrestore(&list->lock, flags);
2351 return result;
2352}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002353EXPORT_SYMBOL(skb_dequeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
2355/**
2356 * skb_dequeue_tail - remove from the tail of the queue
2357 * @list: list to dequeue from
2358 *
2359 * Remove the tail of the list. The list lock is taken so the function
2360 * may be used safely with other locking list functions. The tail item is
2361 * returned or %NULL if the list is empty.
2362 */
2363struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2364{
2365 unsigned long flags;
2366 struct sk_buff *result;
2367
2368 spin_lock_irqsave(&list->lock, flags);
2369 result = __skb_dequeue_tail(list);
2370 spin_unlock_irqrestore(&list->lock, flags);
2371 return result;
2372}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002373EXPORT_SYMBOL(skb_dequeue_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
2375/**
2376 * skb_queue_purge - empty a list
2377 * @list: list to empty
2378 *
2379 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2380 * the list and one reference dropped. This function takes the list
2381 * lock and is atomic with respect to other list locking functions.
2382 */
2383void skb_queue_purge(struct sk_buff_head *list)
2384{
2385 struct sk_buff *skb;
2386 while ((skb = skb_dequeue(list)) != NULL)
2387 kfree_skb(skb);
2388}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002389EXPORT_SYMBOL(skb_queue_purge);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391/**
2392 * skb_queue_head - queue a buffer at the list head
2393 * @list: list to use
2394 * @newsk: buffer to queue
2395 *
2396 * Queue a buffer at the start of the list. This function takes the
2397 * list lock and can be used safely with other locking &sk_buff functions
2398 * safely.
2399 *
2400 * A buffer cannot be placed on two lists at the same time.
2401 */
2402void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2403{
2404 unsigned long flags;
2405
2406 spin_lock_irqsave(&list->lock, flags);
2407 __skb_queue_head(list, newsk);
2408 spin_unlock_irqrestore(&list->lock, flags);
2409}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002410EXPORT_SYMBOL(skb_queue_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
2412/**
2413 * skb_queue_tail - queue a buffer at the list tail
2414 * @list: list to use
2415 * @newsk: buffer to queue
2416 *
2417 * Queue a buffer at the tail of the list. This function takes the
2418 * list lock and can be used safely with other locking &sk_buff functions
2419 * safely.
2420 *
2421 * A buffer cannot be placed on two lists at the same time.
2422 */
2423void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2424{
2425 unsigned long flags;
2426
2427 spin_lock_irqsave(&list->lock, flags);
2428 __skb_queue_tail(list, newsk);
2429 spin_unlock_irqrestore(&list->lock, flags);
2430}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002431EXPORT_SYMBOL(skb_queue_tail);
David S. Miller8728b832005-08-09 19:25:21 -07002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433/**
2434 * skb_unlink - remove a buffer from a list
2435 * @skb: buffer to remove
David S. Miller8728b832005-08-09 19:25:21 -07002436 * @list: list to use
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 *
David S. Miller8728b832005-08-09 19:25:21 -07002438 * Remove a packet from a list. The list locks are taken and this
2439 * function is atomic with respect to other list locked calls
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 *
David S. Miller8728b832005-08-09 19:25:21 -07002441 * You must know what list the SKB is on.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 */
David S. Miller8728b832005-08-09 19:25:21 -07002443void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444{
David S. Miller8728b832005-08-09 19:25:21 -07002445 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
David S. Miller8728b832005-08-09 19:25:21 -07002447 spin_lock_irqsave(&list->lock, flags);
2448 __skb_unlink(skb, list);
2449 spin_unlock_irqrestore(&list->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002451EXPORT_SYMBOL(skb_unlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453/**
2454 * skb_append - append a buffer
2455 * @old: buffer to insert after
2456 * @newsk: buffer to insert
David S. Miller8728b832005-08-09 19:25:21 -07002457 * @list: list to use
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 *
2459 * Place a packet after a given packet in a list. The list locks are taken
2460 * and this function is atomic with respect to other list locked calls.
2461 * A buffer cannot be placed on two lists at the same time.
2462 */
David S. Miller8728b832005-08-09 19:25:21 -07002463void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464{
2465 unsigned long flags;
2466
David S. Miller8728b832005-08-09 19:25:21 -07002467 spin_lock_irqsave(&list->lock, flags);
Gerrit Renker7de6c032008-04-14 00:05:09 -07002468 __skb_queue_after(list, old, newsk);
David S. Miller8728b832005-08-09 19:25:21 -07002469 spin_unlock_irqrestore(&list->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002471EXPORT_SYMBOL(skb_append);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
2473/**
2474 * skb_insert - insert a buffer
2475 * @old: buffer to insert before
2476 * @newsk: buffer to insert
David S. Miller8728b832005-08-09 19:25:21 -07002477 * @list: list to use
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 *
David S. Miller8728b832005-08-09 19:25:21 -07002479 * Place a packet before a given packet in a list. The list locks are
2480 * taken and this function is atomic with respect to other list locked
2481 * calls.
2482 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 * A buffer cannot be placed on two lists at the same time.
2484 */
David S. Miller8728b832005-08-09 19:25:21 -07002485void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486{
2487 unsigned long flags;
2488
David S. Miller8728b832005-08-09 19:25:21 -07002489 spin_lock_irqsave(&list->lock, flags);
2490 __skb_insert(newsk, old->prev, old, list);
2491 spin_unlock_irqrestore(&list->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002493EXPORT_SYMBOL(skb_insert);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495static inline void skb_split_inside_header(struct sk_buff *skb,
2496 struct sk_buff* skb1,
2497 const u32 len, const int pos)
2498{
2499 int i;
2500
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002501 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2502 pos - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 /* And move data appendix as is. */
2504 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2505 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2506
2507 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2508 skb_shinfo(skb)->nr_frags = 0;
2509 skb1->data_len = skb->data_len;
2510 skb1->len += skb1->data_len;
2511 skb->data_len = 0;
2512 skb->len = len;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002513 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514}
2515
2516static inline void skb_split_no_header(struct sk_buff *skb,
2517 struct sk_buff* skb1,
2518 const u32 len, int pos)
2519{
2520 int i, k = 0;
2521 const int nfrags = skb_shinfo(skb)->nr_frags;
2522
2523 skb_shinfo(skb)->nr_frags = 0;
2524 skb1->len = skb1->data_len = skb->len - len;
2525 skb->len = len;
2526 skb->data_len = len - pos;
2527
2528 for (i = 0; i < nfrags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002529 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
2531 if (pos + size > len) {
2532 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2533
2534 if (pos < len) {
2535 /* Split frag.
2536 * We have two variants in this case:
2537 * 1. Move all the frag to the second
2538 * part, if it is possible. F.e.
2539 * this approach is mandatory for TUX,
2540 * where splitting is expensive.
2541 * 2. Split is accurately. We make this.
2542 */
Ian Campbellea2ab692011-08-22 23:44:58 +00002543 skb_frag_ref(skb, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002545 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2546 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 skb_shinfo(skb)->nr_frags++;
2548 }
2549 k++;
2550 } else
2551 skb_shinfo(skb)->nr_frags++;
2552 pos += size;
2553 }
2554 skb_shinfo(skb1)->nr_frags = k;
2555}
2556
2557/**
2558 * skb_split - Split fragmented skb to two parts at length len.
2559 * @skb: the buffer to split
2560 * @skb1: the buffer to receive the second part
2561 * @len: new length for skb
2562 */
2563void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2564{
2565 int pos = skb_headlen(skb);
2566
Amerigo Wang68534c62013-02-19 22:51:30 +00002567 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 if (len < pos) /* Split line is inside header. */
2569 skb_split_inside_header(skb, skb1, len, pos);
2570 else /* Second chunk has no header, nothing to copy. */
2571 skb_split_no_header(skb, skb1, len, pos);
2572}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002573EXPORT_SYMBOL(skb_split);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
Ilpo Järvinen9f782db2008-11-25 13:57:01 -08002575/* Shifting from/to a cloned skb is a no-go.
2576 *
2577 * Caller cannot keep skb_shinfo related pointers past calling here!
2578 */
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002579static int skb_prepare_for_shift(struct sk_buff *skb)
2580{
Ilpo Järvinen0ace2852008-11-24 21:30:21 -08002581 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002582}
2583
2584/**
2585 * skb_shift - Shifts paged data partially from skb to another
2586 * @tgt: buffer into which tail data gets added
2587 * @skb: buffer from which the paged data comes from
2588 * @shiftlen: shift up to this many bytes
2589 *
2590 * Attempts to shift up to shiftlen worth of bytes, which may be less than
Feng King20e994a2011-11-21 01:47:11 +00002591 * the length of the skb, from skb to tgt. Returns number bytes shifted.
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002592 * It's up to caller to free skb if everything was shifted.
2593 *
2594 * If @tgt runs out of frags, the whole operation is aborted.
2595 *
2596 * Skb cannot include anything else but paged data while tgt is allowed
2597 * to have non-paged data as well.
2598 *
2599 * TODO: full sized shift could be optimized but that would need
2600 * specialized skb free'er to handle frags without up-to-date nr_frags.
2601 */
2602int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2603{
2604 int from, to, merge, todo;
2605 struct skb_frag_struct *fragfrom, *fragto;
2606
2607 BUG_ON(shiftlen > skb->len);
2608 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */
2609
2610 todo = shiftlen;
2611 from = 0;
2612 to = skb_shinfo(tgt)->nr_frags;
2613 fragfrom = &skb_shinfo(skb)->frags[from];
2614
2615 /* Actual merge is delayed until the point when we know we can
2616 * commit all, so that we don't have to undo partial changes
2617 */
2618 if (!to ||
Ian Campbellea2ab692011-08-22 23:44:58 +00002619 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2620 fragfrom->page_offset)) {
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002621 merge = -1;
2622 } else {
2623 merge = to - 1;
2624
Eric Dumazet9e903e02011-10-18 21:00:24 +00002625 todo -= skb_frag_size(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002626 if (todo < 0) {
2627 if (skb_prepare_for_shift(skb) ||
2628 skb_prepare_for_shift(tgt))
2629 return 0;
2630
Ilpo Järvinen9f782db2008-11-25 13:57:01 -08002631 /* All previous frag pointers might be stale! */
2632 fragfrom = &skb_shinfo(skb)->frags[from];
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002633 fragto = &skb_shinfo(tgt)->frags[merge];
2634
Eric Dumazet9e903e02011-10-18 21:00:24 +00002635 skb_frag_size_add(fragto, shiftlen);
2636 skb_frag_size_sub(fragfrom, shiftlen);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002637 fragfrom->page_offset += shiftlen;
2638
2639 goto onlymerged;
2640 }
2641
2642 from++;
2643 }
2644
2645 /* Skip full, not-fitting skb to avoid expensive operations */
2646 if ((shiftlen == skb->len) &&
2647 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2648 return 0;
2649
2650 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2651 return 0;
2652
2653 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2654 if (to == MAX_SKB_FRAGS)
2655 return 0;
2656
2657 fragfrom = &skb_shinfo(skb)->frags[from];
2658 fragto = &skb_shinfo(tgt)->frags[to];
2659
Eric Dumazet9e903e02011-10-18 21:00:24 +00002660 if (todo >= skb_frag_size(fragfrom)) {
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002661 *fragto = *fragfrom;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002662 todo -= skb_frag_size(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002663 from++;
2664 to++;
2665
2666 } else {
Ian Campbellea2ab692011-08-22 23:44:58 +00002667 __skb_frag_ref(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002668 fragto->page = fragfrom->page;
2669 fragto->page_offset = fragfrom->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002670 skb_frag_size_set(fragto, todo);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002671
2672 fragfrom->page_offset += todo;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002673 skb_frag_size_sub(fragfrom, todo);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002674 todo = 0;
2675
2676 to++;
2677 break;
2678 }
2679 }
2680
2681 /* Ready to "commit" this state change to tgt */
2682 skb_shinfo(tgt)->nr_frags = to;
2683
2684 if (merge >= 0) {
2685 fragfrom = &skb_shinfo(skb)->frags[0];
2686 fragto = &skb_shinfo(tgt)->frags[merge];
2687
Eric Dumazet9e903e02011-10-18 21:00:24 +00002688 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
Ian Campbellea2ab692011-08-22 23:44:58 +00002689 __skb_frag_unref(fragfrom);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08002690 }
2691
2692 /* Reposition in the original skb */
2693 to = 0;
2694 while (from < skb_shinfo(skb)->nr_frags)
2695 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2696 skb_shinfo(skb)->nr_frags = to;
2697
2698 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2699
2700onlymerged:
2701 /* Most likely the tgt won't ever need its checksum anymore, skb on
2702 * the other hand might need it if it needs to be resent
2703 */
2704 tgt->ip_summed = CHECKSUM_PARTIAL;
2705 skb->ip_summed = CHECKSUM_PARTIAL;
2706
2707 /* Yak, is it really working this way? Some helper please? */
2708 skb->len -= shiftlen;
2709 skb->data_len -= shiftlen;
2710 skb->truesize -= shiftlen;
2711 tgt->len += shiftlen;
2712 tgt->data_len += shiftlen;
2713 tgt->truesize += shiftlen;
2714
2715 return shiftlen;
2716}
2717
Thomas Graf677e90e2005-06-23 20:59:51 -07002718/**
2719 * skb_prepare_seq_read - Prepare a sequential read of skb data
2720 * @skb: the buffer to read
2721 * @from: lower offset of data to be read
2722 * @to: upper offset of data to be read
2723 * @st: state variable
2724 *
2725 * Initializes the specified state variable. Must be called before
2726 * invoking skb_seq_read() for the first time.
2727 */
2728void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2729 unsigned int to, struct skb_seq_state *st)
2730{
2731 st->lower_offset = from;
2732 st->upper_offset = to;
2733 st->root_skb = st->cur_skb = skb;
2734 st->frag_idx = st->stepped_offset = 0;
2735 st->frag_data = NULL;
2736}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002737EXPORT_SYMBOL(skb_prepare_seq_read);
Thomas Graf677e90e2005-06-23 20:59:51 -07002738
2739/**
2740 * skb_seq_read - Sequentially read skb data
2741 * @consumed: number of bytes consumed by the caller so far
2742 * @data: destination pointer for data to be returned
2743 * @st: state variable
2744 *
Mathias Krausebc323832013-11-07 14:18:26 +01002745 * Reads a block of skb data at @consumed relative to the
Thomas Graf677e90e2005-06-23 20:59:51 -07002746 * lower offset specified to skb_prepare_seq_read(). Assigns
Mathias Krausebc323832013-11-07 14:18:26 +01002747 * the head of the data block to @data and returns the length
Thomas Graf677e90e2005-06-23 20:59:51 -07002748 * of the block or 0 if the end of the skb data or the upper
2749 * offset has been reached.
2750 *
2751 * The caller is not required to consume all of the data
Mathias Krausebc323832013-11-07 14:18:26 +01002752 * returned, i.e. @consumed is typically set to the number
Thomas Graf677e90e2005-06-23 20:59:51 -07002753 * of bytes already consumed and the next call to
2754 * skb_seq_read() will return the remaining part of the block.
2755 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002756 * Note 1: The size of each block of data returned can be arbitrary,
Masanari Iidae793c0f2014-09-04 23:44:36 +09002757 * this limitation is the cost for zerocopy sequential
Thomas Graf677e90e2005-06-23 20:59:51 -07002758 * reads of potentially non linear data.
2759 *
Randy Dunlapbc2cda12008-02-13 15:03:25 -08002760 * Note 2: Fragment lists within fragments are not implemented
Thomas Graf677e90e2005-06-23 20:59:51 -07002761 * at the moment, state->root_skb could be replaced with
2762 * a stack for this purpose.
2763 */
2764unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2765 struct skb_seq_state *st)
2766{
2767 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2768 skb_frag_t *frag;
2769
Wedson Almeida Filhoaeb193e2013-06-23 23:33:48 -07002770 if (unlikely(abs_offset >= st->upper_offset)) {
2771 if (st->frag_data) {
2772 kunmap_atomic(st->frag_data);
2773 st->frag_data = NULL;
2774 }
Thomas Graf677e90e2005-06-23 20:59:51 -07002775 return 0;
Wedson Almeida Filhoaeb193e2013-06-23 23:33:48 -07002776 }
Thomas Graf677e90e2005-06-23 20:59:51 -07002777
2778next_skb:
Herbert Xu95e3b242009-01-29 16:07:52 -08002779 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
Thomas Graf677e90e2005-06-23 20:59:51 -07002780
Thomas Chenault995b3372009-05-18 21:43:27 -07002781 if (abs_offset < block_limit && !st->frag_data) {
Herbert Xu95e3b242009-01-29 16:07:52 -08002782 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
Thomas Graf677e90e2005-06-23 20:59:51 -07002783 return block_limit - abs_offset;
2784 }
2785
2786 if (st->frag_idx == 0 && !st->frag_data)
2787 st->stepped_offset += skb_headlen(st->cur_skb);
2788
2789 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2790 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002791 block_limit = skb_frag_size(frag) + st->stepped_offset;
Thomas Graf677e90e2005-06-23 20:59:51 -07002792
2793 if (abs_offset < block_limit) {
2794 if (!st->frag_data)
Eric Dumazet51c56b02012-04-05 11:35:15 +02002795 st->frag_data = kmap_atomic(skb_frag_page(frag));
Thomas Graf677e90e2005-06-23 20:59:51 -07002796
2797 *data = (u8 *) st->frag_data + frag->page_offset +
2798 (abs_offset - st->stepped_offset);
2799
2800 return block_limit - abs_offset;
2801 }
2802
2803 if (st->frag_data) {
Eric Dumazet51c56b02012-04-05 11:35:15 +02002804 kunmap_atomic(st->frag_data);
Thomas Graf677e90e2005-06-23 20:59:51 -07002805 st->frag_data = NULL;
2806 }
2807
2808 st->frag_idx++;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002809 st->stepped_offset += skb_frag_size(frag);
Thomas Graf677e90e2005-06-23 20:59:51 -07002810 }
2811
Olaf Kirch5b5a60d2007-06-23 23:11:52 -07002812 if (st->frag_data) {
Eric Dumazet51c56b02012-04-05 11:35:15 +02002813 kunmap_atomic(st->frag_data);
Olaf Kirch5b5a60d2007-06-23 23:11:52 -07002814 st->frag_data = NULL;
2815 }
2816
David S. Miller21dc3302010-08-23 00:13:46 -07002817 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
Shyam Iyer71b33462009-01-29 16:12:42 -08002818 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
Thomas Graf677e90e2005-06-23 20:59:51 -07002819 st->frag_idx = 0;
2820 goto next_skb;
Shyam Iyer71b33462009-01-29 16:12:42 -08002821 } else if (st->cur_skb->next) {
2822 st->cur_skb = st->cur_skb->next;
Herbert Xu95e3b242009-01-29 16:07:52 -08002823 st->frag_idx = 0;
Thomas Graf677e90e2005-06-23 20:59:51 -07002824 goto next_skb;
2825 }
2826
2827 return 0;
2828}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002829EXPORT_SYMBOL(skb_seq_read);
Thomas Graf677e90e2005-06-23 20:59:51 -07002830
2831/**
2832 * skb_abort_seq_read - Abort a sequential read of skb data
2833 * @st: state variable
2834 *
2835 * Must be called if skb_seq_read() was not called until it
2836 * returned 0.
2837 */
2838void skb_abort_seq_read(struct skb_seq_state *st)
2839{
2840 if (st->frag_data)
Eric Dumazet51c56b02012-04-05 11:35:15 +02002841 kunmap_atomic(st->frag_data);
Thomas Graf677e90e2005-06-23 20:59:51 -07002842}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002843EXPORT_SYMBOL(skb_abort_seq_read);
Thomas Graf677e90e2005-06-23 20:59:51 -07002844
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07002845#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2846
2847static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2848 struct ts_config *conf,
2849 struct ts_state *state)
2850{
2851 return skb_seq_read(offset, text, TS_SKB_CB(state));
2852}
2853
2854static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2855{
2856 skb_abort_seq_read(TS_SKB_CB(state));
2857}
2858
2859/**
2860 * skb_find_text - Find a text pattern in skb data
2861 * @skb: the buffer to look in
2862 * @from: search offset
2863 * @to: search limit
2864 * @config: textsearch configuration
2865 * @state: uninitialized textsearch state variable
2866 *
2867 * Finds a pattern in the skb data according to the specified
2868 * textsearch configuration. Use textsearch_next() to retrieve
2869 * subsequent occurrences of the pattern. Returns the offset
2870 * to the first occurrence or UINT_MAX if no match was found.
2871 */
2872unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2873 unsigned int to, struct ts_config *config,
2874 struct ts_state *state)
2875{
Phil Oesterf72b9482006-06-26 00:00:57 -07002876 unsigned int ret;
2877
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07002878 config->get_next_block = skb_ts_get_next_block;
2879 config->finish = skb_ts_finish;
2880
2881 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2882
Phil Oesterf72b9482006-06-26 00:00:57 -07002883 ret = textsearch_find(config, state);
2884 return (ret <= to - from ? ret : UINT_MAX);
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07002885}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002886EXPORT_SYMBOL(skb_find_text);
Thomas Graf3fc7e8a2005-06-23 21:00:17 -07002887
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002888/**
Ben Hutchings2c530402012-07-10 10:55:09 +00002889 * skb_append_datato_frags - append the user data to a skb
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002890 * @sk: sock structure
Masanari Iidae793c0f2014-09-04 23:44:36 +09002891 * @skb: skb structure to be appended with user data.
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002892 * @getfrag: call back function to be used for getting the user data
2893 * @from: pointer to user message iov
2894 * @length: length of the iov message
2895 *
2896 * Description: This procedure append the user data in the fragment part
2897 * of the skb if any page alloc fails user this procedure returns -ENOMEM
2898 */
2899int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
Martin Waitzdab96302005-12-05 13:40:12 -08002900 int (*getfrag)(void *from, char *to, int offset,
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002901 int len, int odd, struct sk_buff *skb),
2902 void *from, int length)
2903{
Eric Dumazetb2111722012-12-28 06:06:37 +00002904 int frg_cnt = skb_shinfo(skb)->nr_frags;
2905 int copy;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002906 int offset = 0;
2907 int ret;
Eric Dumazetb2111722012-12-28 06:06:37 +00002908 struct page_frag *pfrag = &current->task_frag;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002909
2910 do {
2911 /* Return error if we don't have space for new frag */
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002912 if (frg_cnt >= MAX_SKB_FRAGS)
Eric Dumazetb2111722012-12-28 06:06:37 +00002913 return -EMSGSIZE;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002914
Eric Dumazetb2111722012-12-28 06:06:37 +00002915 if (!sk_page_frag_refill(sk, pfrag))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002916 return -ENOMEM;
2917
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002918 /* copy the user data to page */
Eric Dumazetb2111722012-12-28 06:06:37 +00002919 copy = min_t(int, length, pfrag->size - pfrag->offset);
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002920
Eric Dumazetb2111722012-12-28 06:06:37 +00002921 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2922 offset, copy, 0, skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002923 if (ret < 0)
2924 return -EFAULT;
2925
2926 /* copy was successful so update the size parameters */
Eric Dumazetb2111722012-12-28 06:06:37 +00002927 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2928 copy);
2929 frg_cnt++;
2930 pfrag->offset += copy;
2931 get_page(pfrag->page);
2932
2933 skb->truesize += copy;
2934 atomic_add(copy, &sk->sk_wmem_alloc);
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002935 skb->len += copy;
2936 skb->data_len += copy;
2937 offset += copy;
2938 length -= copy;
2939
2940 } while (length > 0);
2941
2942 return 0;
2943}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08002944EXPORT_SYMBOL(skb_append_datato_frags);
Ananda Rajue89e9cf2005-10-18 15:46:41 -07002945
Herbert Xucbb042f2006-03-20 22:43:56 -08002946/**
2947 * skb_pull_rcsum - pull skb and update receive checksum
2948 * @skb: buffer to update
Herbert Xucbb042f2006-03-20 22:43:56 -08002949 * @len: length of data pulled
2950 *
2951 * This function performs an skb_pull on the packet and updates
Urs Thuermannfee54fa2008-02-12 22:03:25 -08002952 * the CHECKSUM_COMPLETE checksum. It should be used on
Patrick McHardy84fa7932006-08-29 16:44:56 -07002953 * receive path processing instead of skb_pull unless you know
2954 * that the checksum difference is zero (e.g., a valid IP header)
2955 * or you are setting ip_summed to CHECKSUM_NONE.
Herbert Xucbb042f2006-03-20 22:43:56 -08002956 */
2957unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2958{
2959 BUG_ON(len > skb->len);
2960 skb->len -= len;
2961 BUG_ON(skb->len < skb->data_len);
2962 skb_postpull_rcsum(skb, skb->data, len);
2963 return skb->data += len;
2964}
Arnaldo Carvalho de Melof94691a2006-03-20 22:47:55 -08002965EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2966
Herbert Xuf4c50d92006-06-22 03:02:40 -07002967/**
2968 * skb_segment - Perform protocol segmentation on skb.
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02002969 * @head_skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002970 * @features: features for the output path (see dev->features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002971 *
2972 * This function performs segmentation on the given skb. It returns
Ben Hutchings4c821d72008-04-13 21:52:48 -07002973 * a pointer to the first in a list of new skbs for the segments.
2974 * In case of error it returns ERR_PTR(err).
Herbert Xuf4c50d92006-06-22 03:02:40 -07002975 */
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02002976struct sk_buff *skb_segment(struct sk_buff *head_skb,
2977 netdev_features_t features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002978{
2979 struct sk_buff *segs = NULL;
2980 struct sk_buff *tail = NULL;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02002981 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02002982 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2983 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2984 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02002985 struct sk_buff *frag_skb = head_skb;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002986 unsigned int offset = doffset;
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02002987 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002988 unsigned int headroom;
2989 unsigned int len;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002990 __be16 proto;
2991 bool csum;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08002992 int sg = !!(features & NETIF_F_SG);
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02002993 int nfrags = skb_shinfo(head_skb)->nr_frags;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002994 int err = -ENOMEM;
2995 int i = 0;
2996 int pos;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002997 int dummy;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002998
Wei-Chun Chao5882a072014-06-08 23:48:54 -07002999 __skb_push(head_skb, doffset);
Vlad Yasevich53d64712014-03-27 17:26:18 -04003000 proto = skb_network_protocol(head_skb, &dummy);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003001 if (unlikely(!proto))
3002 return ERR_PTR(-EINVAL);
3003
Tom Herbert7e2b10c2014-06-04 17:20:02 -07003004 csum = !head_skb->encap_hdr_csum &&
3005 !!can_checksum_protocol(features, proto);
3006
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003007 headroom = skb_headroom(head_skb);
3008 pos = skb_headlen(head_skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003009
3010 do {
3011 struct sk_buff *nskb;
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003012 skb_frag_t *nskb_frag;
Herbert Xuc8884ed2006-10-29 15:59:41 -08003013 int hsize;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003014 int size;
3015
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003016 len = head_skb->len - offset;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003017 if (len > mss)
3018 len = mss;
3019
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003020 hsize = skb_headlen(head_skb) - offset;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003021 if (hsize < 0)
3022 hsize = 0;
Herbert Xuc8884ed2006-10-29 15:59:41 -08003023 if (hsize > len || !sg)
3024 hsize = len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003025
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003026 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3027 (skb_headlen(list_skb) == len || sg)) {
3028 BUG_ON(skb_headlen(list_skb) > len);
Herbert Xu89319d382008-12-15 23:26:06 -08003029
Herbert Xu9d8506c2013-11-21 11:10:04 -08003030 i = 0;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003031 nfrags = skb_shinfo(list_skb)->nr_frags;
3032 frag = skb_shinfo(list_skb)->frags;
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02003033 frag_skb = list_skb;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003034 pos += skb_headlen(list_skb);
Herbert Xu9d8506c2013-11-21 11:10:04 -08003035
3036 while (pos < offset + len) {
3037 BUG_ON(i >= nfrags);
3038
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02003039 size = skb_frag_size(frag);
Herbert Xu9d8506c2013-11-21 11:10:04 -08003040 if (pos + size > offset + len)
3041 break;
3042
3043 i++;
3044 pos += size;
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02003045 frag++;
Herbert Xu9d8506c2013-11-21 11:10:04 -08003046 }
3047
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003048 nskb = skb_clone(list_skb, GFP_ATOMIC);
3049 list_skb = list_skb->next;
Herbert Xu89319d382008-12-15 23:26:06 -08003050
3051 if (unlikely(!nskb))
3052 goto err;
3053
Herbert Xu9d8506c2013-11-21 11:10:04 -08003054 if (unlikely(pskb_trim(nskb, len))) {
3055 kfree_skb(nskb);
3056 goto err;
3057 }
3058
Alexander Duyckec47ea82012-05-04 14:26:56 +00003059 hsize = skb_end_offset(nskb);
Herbert Xu89319d382008-12-15 23:26:06 -08003060 if (skb_cow_head(nskb, doffset + headroom)) {
3061 kfree_skb(nskb);
3062 goto err;
3063 }
3064
Alexander Duyckec47ea82012-05-04 14:26:56 +00003065 nskb->truesize += skb_end_offset(nskb) - hsize;
Herbert Xu89319d382008-12-15 23:26:06 -08003066 skb_release_head_state(nskb);
3067 __skb_push(nskb, doffset);
3068 } else {
Mel Gormanc93bdd02012-07-31 16:44:19 -07003069 nskb = __alloc_skb(hsize + doffset + headroom,
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003070 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
Mel Gormanc93bdd02012-07-31 16:44:19 -07003071 NUMA_NO_NODE);
Herbert Xu89319d382008-12-15 23:26:06 -08003072
3073 if (unlikely(!nskb))
3074 goto err;
3075
3076 skb_reserve(nskb, headroom);
3077 __skb_put(nskb, doffset);
3078 }
Herbert Xuf4c50d92006-06-22 03:02:40 -07003079
3080 if (segs)
3081 tail->next = nskb;
3082 else
3083 segs = nskb;
3084 tail = nskb;
3085
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003086 __copy_skb_header(nskb, head_skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003087
Eric Dumazet030737b2013-10-19 11:42:54 -07003088 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
Vlad Yasevichfcdfe3a2014-07-31 10:33:06 -04003089 skb_reset_mac_len(nskb);
Pravin B Shelar68c33162013-02-14 14:02:41 +00003090
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003091 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
Pravin B Shelar68c33162013-02-14 14:02:41 +00003092 nskb->data - tnl_hlen,
3093 doffset + tnl_hlen);
Herbert Xu89319d382008-12-15 23:26:06 -08003094
Herbert Xu9d8506c2013-11-21 11:10:04 -08003095 if (nskb->len == len + doffset)
Simon Horman1cdbcb72013-05-19 15:46:49 +00003096 goto perform_csum_check;
Herbert Xu89319d382008-12-15 23:26:06 -08003097
Tom Herberte585f232014-11-04 09:06:54 -08003098 if (!sg && !nskb->remcsum_offload) {
Herbert Xu6f85a122008-08-15 14:55:02 -07003099 nskb->ip_summed = CHECKSUM_NONE;
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003100 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
Herbert Xuf4c50d92006-06-22 03:02:40 -07003101 skb_put(nskb, len),
3102 len, 0);
Tom Herbert7e2b10c2014-06-04 17:20:02 -07003103 SKB_GSO_CB(nskb)->csum_start =
Tom Herbertde843722014-06-25 12:51:01 -07003104 skb_headroom(nskb) + doffset;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003105 continue;
3106 }
3107
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003108 nskb_frag = skb_shinfo(nskb)->frags;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003109
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003110 skb_copy_from_linear_data_offset(head_skb, offset,
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003111 skb_put(nskb, hsize), hsize);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003112
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003113 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
3114 SKBTX_SHARED_FRAG;
Eric Dumazetcef401d2013-01-25 20:34:37 +00003115
Herbert Xu9d8506c2013-11-21 11:10:04 -08003116 while (pos < offset + len) {
3117 if (i >= nfrags) {
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003118 BUG_ON(skb_headlen(list_skb));
Herbert Xu9d8506c2013-11-21 11:10:04 -08003119
3120 i = 0;
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003121 nfrags = skb_shinfo(list_skb)->nr_frags;
3122 frag = skb_shinfo(list_skb)->frags;
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02003123 frag_skb = list_skb;
Herbert Xu9d8506c2013-11-21 11:10:04 -08003124
3125 BUG_ON(!nfrags);
3126
Michael S. Tsirkin1a4ceda2014-03-10 19:27:59 +02003127 list_skb = list_skb->next;
Herbert Xu9d8506c2013-11-21 11:10:04 -08003128 }
3129
3130 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3131 MAX_SKB_FRAGS)) {
3132 net_warn_ratelimited(
3133 "skb_segment: too many frags: %u %u\n",
3134 pos, mss);
3135 goto err;
3136 }
3137
Michael S. Tsirkin1fd819e2014-03-10 19:28:08 +02003138 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3139 goto err;
3140
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02003141 *nskb_frag = *frag;
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003142 __skb_frag_ref(nskb_frag);
3143 size = skb_frag_size(nskb_frag);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003144
3145 if (pos < offset) {
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003146 nskb_frag->page_offset += offset - pos;
3147 skb_frag_size_sub(nskb_frag, offset - pos);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003148 }
3149
Herbert Xu89319d382008-12-15 23:26:06 -08003150 skb_shinfo(nskb)->nr_frags++;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003151
3152 if (pos + size <= offset + len) {
3153 i++;
Michael S. Tsirkin4e1beba2014-03-10 18:29:14 +02003154 frag++;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003155 pos += size;
3156 } else {
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003157 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
Herbert Xu89319d382008-12-15 23:26:06 -08003158 goto skip_fraglist;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003159 }
3160
Michael S. Tsirkin8cb19902014-03-10 18:29:04 +02003161 nskb_frag++;
Herbert Xuf4c50d92006-06-22 03:02:40 -07003162 }
3163
Herbert Xu89319d382008-12-15 23:26:06 -08003164skip_fraglist:
Herbert Xuf4c50d92006-06-22 03:02:40 -07003165 nskb->data_len = len - hsize;
3166 nskb->len += nskb->data_len;
3167 nskb->truesize += nskb->data_len;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003168
Simon Horman1cdbcb72013-05-19 15:46:49 +00003169perform_csum_check:
Tom Herberte585f232014-11-04 09:06:54 -08003170 if (!csum && !nskb->remcsum_offload) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003171 nskb->csum = skb_checksum(nskb, doffset,
3172 nskb->len - doffset, 0);
3173 nskb->ip_summed = CHECKSUM_NONE;
Tom Herbert7e2b10c2014-06-04 17:20:02 -07003174 SKB_GSO_CB(nskb)->csum_start =
3175 skb_headroom(nskb) + doffset;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003176 }
Michael S. Tsirkindf5771f2014-03-10 18:29:19 +02003177 } while ((offset += len) < head_skb->len);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003178
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003179 /* Some callers want to get the end of the list.
3180 * Put it in segs->prev to avoid walking the list.
3181 * (see validate_xmit_skb_list() for example)
3182 */
3183 segs->prev = tail;
Toshiaki Makita432c8562014-10-27 10:30:51 -07003184
3185 /* Following permits correct backpressure, for protocols
3186 * using skb_set_owner_w().
3187 * Idea is to tranfert ownership from head_skb to last segment.
3188 */
3189 if (head_skb->destructor == sock_wfree) {
3190 swap(tail->truesize, head_skb->truesize);
3191 swap(tail->destructor, head_skb->destructor);
3192 swap(tail->sk, head_skb->sk);
3193 }
Herbert Xuf4c50d92006-06-22 03:02:40 -07003194 return segs;
3195
3196err:
Eric Dumazet289dccb2013-12-20 14:29:08 -08003197 kfree_skb_list(segs);
Herbert Xuf4c50d92006-06-22 03:02:40 -07003198 return ERR_PTR(err);
3199}
Herbert Xuf4c50d92006-06-22 03:02:40 -07003200EXPORT_SYMBOL_GPL(skb_segment);
3201
Herbert Xu71d93b32008-12-15 23:42:33 -08003202int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3203{
Eric Dumazet8a291112013-10-08 09:02:23 -07003204 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
Herbert Xu67147ba2009-05-26 18:50:22 +00003205 unsigned int offset = skb_gro_offset(skb);
3206 unsigned int headlen = skb_headlen(skb);
Eric Dumazet8a291112013-10-08 09:02:23 -07003207 struct sk_buff *nskb, *lp, *p = *head;
3208 unsigned int len = skb_gro_len(skb);
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003209 unsigned int delta_truesize;
Eric Dumazet8a291112013-10-08 09:02:23 -07003210 unsigned int headroom;
Herbert Xu71d93b32008-12-15 23:42:33 -08003211
Eric Dumazet8a291112013-10-08 09:02:23 -07003212 if (unlikely(p->len + len >= 65536))
Herbert Xu71d93b32008-12-15 23:42:33 -08003213 return -E2BIG;
3214
Eric Dumazet29e98242014-05-16 11:34:37 -07003215 lp = NAPI_GRO_CB(p)->last;
Eric Dumazet8a291112013-10-08 09:02:23 -07003216 pinfo = skb_shinfo(lp);
3217
3218 if (headlen <= offset) {
Herbert Xu42da6992009-05-26 18:50:19 +00003219 skb_frag_t *frag;
Herbert Xu66e92fc2009-05-26 18:50:32 +00003220 skb_frag_t *frag2;
Herbert Xu9aaa1562009-05-26 18:50:33 +00003221 int i = skbinfo->nr_frags;
3222 int nr_frags = pinfo->nr_frags + i;
Herbert Xu42da6992009-05-26 18:50:19 +00003223
Herbert Xu66e92fc2009-05-26 18:50:32 +00003224 if (nr_frags > MAX_SKB_FRAGS)
Eric Dumazet8a291112013-10-08 09:02:23 -07003225 goto merge;
Herbert Xu81705ad2009-01-29 14:19:51 +00003226
Eric Dumazet8a291112013-10-08 09:02:23 -07003227 offset -= headlen;
Herbert Xu9aaa1562009-05-26 18:50:33 +00003228 pinfo->nr_frags = nr_frags;
3229 skbinfo->nr_frags = 0;
Herbert Xuf5572062009-01-14 20:40:03 -08003230
Herbert Xu9aaa1562009-05-26 18:50:33 +00003231 frag = pinfo->frags + nr_frags;
3232 frag2 = skbinfo->frags + i;
Herbert Xu66e92fc2009-05-26 18:50:32 +00003233 do {
3234 *--frag = *--frag2;
3235 } while (--i);
3236
3237 frag->page_offset += offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003238 skb_frag_size_sub(frag, offset);
Herbert Xu66e92fc2009-05-26 18:50:32 +00003239
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003240 /* all fragments truesize : remove (head size + sk_buff) */
Alexander Duyckec47ea82012-05-04 14:26:56 +00003241 delta_truesize = skb->truesize -
3242 SKB_TRUESIZE(skb_end_offset(skb));
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003243
Herbert Xuf5572062009-01-14 20:40:03 -08003244 skb->truesize -= skb->data_len;
3245 skb->len -= skb->data_len;
3246 skb->data_len = 0;
3247
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003248 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
Herbert Xu5d38a072009-01-04 16:13:40 -08003249 goto done;
Eric Dumazetd7e88832012-04-30 08:10:34 +00003250 } else if (skb->head_frag) {
3251 int nr_frags = pinfo->nr_frags;
3252 skb_frag_t *frag = pinfo->frags + nr_frags;
3253 struct page *page = virt_to_head_page(skb->head);
3254 unsigned int first_size = headlen - offset;
3255 unsigned int first_offset;
3256
3257 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
Eric Dumazet8a291112013-10-08 09:02:23 -07003258 goto merge;
Eric Dumazetd7e88832012-04-30 08:10:34 +00003259
3260 first_offset = skb->data -
3261 (unsigned char *)page_address(page) +
3262 offset;
3263
3264 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3265
3266 frag->page.p = page;
3267 frag->page_offset = first_offset;
3268 skb_frag_size_set(frag, first_size);
3269
3270 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3271 /* We dont need to clear skbinfo->nr_frags here */
3272
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003273 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
Eric Dumazetd7e88832012-04-30 08:10:34 +00003274 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3275 goto done;
Eric Dumazet8a291112013-10-08 09:02:23 -07003276 }
Eric Dumazet73d3fe62014-09-29 10:34:29 -07003277 /* switch back to head shinfo */
3278 pinfo = skb_shinfo(p);
3279
Eric Dumazet8a291112013-10-08 09:02:23 -07003280 if (pinfo->frag_list)
3281 goto merge;
3282 if (skb_gro_len(p) != pinfo->gso_size)
Herbert Xu69c0cab2009-11-17 05:18:18 -08003283 return -E2BIG;
Herbert Xu71d93b32008-12-15 23:42:33 -08003284
3285 headroom = skb_headroom(p);
Eric Dumazet3d3be432010-09-01 00:50:51 +00003286 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
Herbert Xu71d93b32008-12-15 23:42:33 -08003287 if (unlikely(!nskb))
3288 return -ENOMEM;
3289
3290 __copy_skb_header(nskb, p);
3291 nskb->mac_len = p->mac_len;
3292
3293 skb_reserve(nskb, headroom);
Herbert Xu86911732009-01-29 14:19:50 +00003294 __skb_put(nskb, skb_gro_offset(p));
Herbert Xu71d93b32008-12-15 23:42:33 -08003295
Herbert Xu86911732009-01-29 14:19:50 +00003296 skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
Herbert Xu71d93b32008-12-15 23:42:33 -08003297 skb_set_network_header(nskb, skb_network_offset(p));
3298 skb_set_transport_header(nskb, skb_transport_offset(p));
3299
Herbert Xu86911732009-01-29 14:19:50 +00003300 __skb_pull(p, skb_gro_offset(p));
3301 memcpy(skb_mac_header(nskb), skb_mac_header(p),
3302 p->data - skb_mac_header(p));
Herbert Xu71d93b32008-12-15 23:42:33 -08003303
Herbert Xu71d93b32008-12-15 23:42:33 -08003304 skb_shinfo(nskb)->frag_list = p;
Herbert Xu9aaa1562009-05-26 18:50:33 +00003305 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
Herbert Xu622e0ca2010-05-20 23:07:56 -07003306 pinfo->gso_size = 0;
Eric Dumazetf4a775d2014-09-22 16:29:32 -07003307 __skb_header_release(p);
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003308 NAPI_GRO_CB(nskb)->last = p;
Herbert Xu71d93b32008-12-15 23:42:33 -08003309
3310 nskb->data_len += p->len;
Eric Dumazetde8261c2012-02-13 04:09:20 +00003311 nskb->truesize += p->truesize;
Herbert Xu71d93b32008-12-15 23:42:33 -08003312 nskb->len += p->len;
3313
3314 *head = nskb;
3315 nskb->next = p->next;
3316 p->next = NULL;
3317
3318 p = nskb;
3319
3320merge:
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003321 delta_truesize = skb->truesize;
Herbert Xu67147ba2009-05-26 18:50:22 +00003322 if (offset > headlen) {
Michal Schmidtd1dc7ab2011-01-24 12:08:48 +00003323 unsigned int eat = offset - headlen;
3324
3325 skbinfo->frags[0].page_offset += eat;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003326 skb_frag_size_sub(&skbinfo->frags[0], eat);
Michal Schmidtd1dc7ab2011-01-24 12:08:48 +00003327 skb->data_len -= eat;
3328 skb->len -= eat;
Herbert Xu67147ba2009-05-26 18:50:22 +00003329 offset = headlen;
Herbert Xu56035022009-02-05 21:26:52 -08003330 }
3331
Herbert Xu67147ba2009-05-26 18:50:22 +00003332 __skb_pull(skb, offset);
Herbert Xu56035022009-02-05 21:26:52 -08003333
Eric Dumazet29e98242014-05-16 11:34:37 -07003334 if (NAPI_GRO_CB(p)->last == p)
Eric Dumazet8a291112013-10-08 09:02:23 -07003335 skb_shinfo(p)->frag_list = skb;
3336 else
3337 NAPI_GRO_CB(p)->last->next = skb;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003338 NAPI_GRO_CB(p)->last = skb;
Eric Dumazetf4a775d2014-09-22 16:29:32 -07003339 __skb_header_release(skb);
Eric Dumazet8a291112013-10-08 09:02:23 -07003340 lp = p;
Herbert Xu71d93b32008-12-15 23:42:33 -08003341
Herbert Xu5d38a072009-01-04 16:13:40 -08003342done:
3343 NAPI_GRO_CB(p)->count++;
Herbert Xu37fe4732009-01-17 19:48:13 +00003344 p->data_len += len;
Eric Dumazet715dc1f2012-05-02 23:33:21 +00003345 p->truesize += delta_truesize;
Herbert Xu37fe4732009-01-17 19:48:13 +00003346 p->len += len;
Eric Dumazet8a291112013-10-08 09:02:23 -07003347 if (lp != p) {
3348 lp->data_len += len;
3349 lp->truesize += delta_truesize;
3350 lp->len += len;
3351 }
Herbert Xu71d93b32008-12-15 23:42:33 -08003352 NAPI_GRO_CB(skb)->same_flow = 1;
3353 return 0;
3354}
Herbert Xu71d93b32008-12-15 23:42:33 -08003355
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356void __init skb_init(void)
3357{
3358 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3359 sizeof(struct sk_buff),
3360 0,
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07003361 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09003362 NULL);
David S. Millerd179cd12005-08-17 14:57:30 -07003363 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
Eric Dumazetd0bf4a92014-09-29 13:29:15 -07003364 sizeof(struct sk_buff_fclones),
David S. Millerd179cd12005-08-17 14:57:30 -07003365 0,
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07003366 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09003367 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368}
3369
David Howells716ea3a2007-04-02 20:19:53 -07003370/**
3371 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3372 * @skb: Socket buffer containing the buffers to be mapped
3373 * @sg: The scatter-gather list to map into
3374 * @offset: The offset into the buffer's contents to start mapping
3375 * @len: Length of buffer space to be mapped
3376 *
3377 * Fill the specified scatter-gather list with mappings/pointers into a
3378 * region of the buffer space attached to a socket buffer.
3379 */
David S. Miller51c739d2007-10-30 21:29:29 -07003380static int
3381__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
David Howells716ea3a2007-04-02 20:19:53 -07003382{
David S. Miller1a028e52007-04-27 15:21:23 -07003383 int start = skb_headlen(skb);
3384 int i, copy = start - offset;
David S. Millerfbb398a2009-06-09 00:18:59 -07003385 struct sk_buff *frag_iter;
David Howells716ea3a2007-04-02 20:19:53 -07003386 int elt = 0;
3387
3388 if (copy > 0) {
3389 if (copy > len)
3390 copy = len;
Jens Axboe642f1492007-10-24 11:20:47 +02003391 sg_set_buf(sg, skb->data + offset, copy);
David Howells716ea3a2007-04-02 20:19:53 -07003392 elt++;
3393 if ((len -= copy) == 0)
3394 return elt;
3395 offset += copy;
3396 }
3397
3398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
David S. Miller1a028e52007-04-27 15:21:23 -07003399 int end;
David Howells716ea3a2007-04-02 20:19:53 -07003400
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003401 WARN_ON(start > offset + len);
David S. Miller1a028e52007-04-27 15:21:23 -07003402
Eric Dumazet9e903e02011-10-18 21:00:24 +00003403 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
David Howells716ea3a2007-04-02 20:19:53 -07003404 if ((copy = end - offset) > 0) {
3405 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3406
3407 if (copy > len)
3408 copy = len;
Ian Campbellea2ab692011-08-22 23:44:58 +00003409 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
Jens Axboe642f1492007-10-24 11:20:47 +02003410 frag->page_offset+offset-start);
David Howells716ea3a2007-04-02 20:19:53 -07003411 elt++;
3412 if (!(len -= copy))
3413 return elt;
3414 offset += copy;
3415 }
David S. Miller1a028e52007-04-27 15:21:23 -07003416 start = end;
David Howells716ea3a2007-04-02 20:19:53 -07003417 }
3418
David S. Millerfbb398a2009-06-09 00:18:59 -07003419 skb_walk_frags(skb, frag_iter) {
3420 int end;
David Howells716ea3a2007-04-02 20:19:53 -07003421
David S. Millerfbb398a2009-06-09 00:18:59 -07003422 WARN_ON(start > offset + len);
David Howells716ea3a2007-04-02 20:19:53 -07003423
David S. Millerfbb398a2009-06-09 00:18:59 -07003424 end = start + frag_iter->len;
3425 if ((copy = end - offset) > 0) {
3426 if (copy > len)
3427 copy = len;
3428 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3429 copy);
3430 if ((len -= copy) == 0)
3431 return elt;
3432 offset += copy;
David Howells716ea3a2007-04-02 20:19:53 -07003433 }
David S. Millerfbb398a2009-06-09 00:18:59 -07003434 start = end;
David Howells716ea3a2007-04-02 20:19:53 -07003435 }
3436 BUG_ON(len);
3437 return elt;
3438}
3439
Fan Du25a91d82014-01-18 09:54:23 +08003440/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3441 * sglist without mark the sg which contain last skb data as the end.
3442 * So the caller can mannipulate sg list as will when padding new data after
3443 * the first call without calling sg_unmark_end to expend sg list.
3444 *
3445 * Scenario to use skb_to_sgvec_nomark:
3446 * 1. sg_init_table
3447 * 2. skb_to_sgvec_nomark(payload1)
3448 * 3. skb_to_sgvec_nomark(payload2)
3449 *
3450 * This is equivalent to:
3451 * 1. sg_init_table
3452 * 2. skb_to_sgvec(payload1)
3453 * 3. sg_unmark_end
3454 * 4. skb_to_sgvec(payload2)
3455 *
3456 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3457 * is more preferable.
3458 */
3459int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3460 int offset, int len)
3461{
3462 return __skb_to_sgvec(skb, sg, offset, len);
3463}
3464EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3465
David S. Miller51c739d2007-10-30 21:29:29 -07003466int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3467{
3468 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3469
Jens Axboec46f2332007-10-31 12:06:37 +01003470 sg_mark_end(&sg[nsg - 1]);
David S. Miller51c739d2007-10-30 21:29:29 -07003471
3472 return nsg;
3473}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003474EXPORT_SYMBOL_GPL(skb_to_sgvec);
David S. Miller51c739d2007-10-30 21:29:29 -07003475
David Howells716ea3a2007-04-02 20:19:53 -07003476/**
3477 * skb_cow_data - Check that a socket buffer's data buffers are writable
3478 * @skb: The socket buffer to check.
3479 * @tailbits: Amount of trailing space to be added
3480 * @trailer: Returned pointer to the skb where the @tailbits space begins
3481 *
3482 * Make sure that the data buffers attached to a socket buffer are
3483 * writable. If they are not, private copies are made of the data buffers
3484 * and the socket buffer is set to use these instead.
3485 *
3486 * If @tailbits is given, make sure that there is space to write @tailbits
3487 * bytes of data beyond current end of socket buffer. @trailer will be
3488 * set to point to the skb in which this space begins.
3489 *
3490 * The number of scatterlist elements required to completely map the
3491 * COW'd and extended socket buffer will be returned.
3492 */
3493int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3494{
3495 int copyflag;
3496 int elt;
3497 struct sk_buff *skb1, **skb_p;
3498
3499 /* If skb is cloned or its head is paged, reallocate
3500 * head pulling out all the pages (pages are considered not writable
3501 * at the moment even if they are anonymous).
3502 */
3503 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3504 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3505 return -ENOMEM;
3506
3507 /* Easy case. Most of packets will go this way. */
David S. Miller21dc3302010-08-23 00:13:46 -07003508 if (!skb_has_frag_list(skb)) {
David Howells716ea3a2007-04-02 20:19:53 -07003509 /* A little of trouble, not enough of space for trailer.
3510 * This should not happen, when stack is tuned to generate
3511 * good frames. OK, on miss we reallocate and reserve even more
3512 * space, 128 bytes is fair. */
3513
3514 if (skb_tailroom(skb) < tailbits &&
3515 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3516 return -ENOMEM;
3517
3518 /* Voila! */
3519 *trailer = skb;
3520 return 1;
3521 }
3522
3523 /* Misery. We are in troubles, going to mincer fragments... */
3524
3525 elt = 1;
3526 skb_p = &skb_shinfo(skb)->frag_list;
3527 copyflag = 0;
3528
3529 while ((skb1 = *skb_p) != NULL) {
3530 int ntail = 0;
3531
3532 /* The fragment is partially pulled by someone,
3533 * this can happen on input. Copy it and everything
3534 * after it. */
3535
3536 if (skb_shared(skb1))
3537 copyflag = 1;
3538
3539 /* If the skb is the last, worry about trailer. */
3540
3541 if (skb1->next == NULL && tailbits) {
3542 if (skb_shinfo(skb1)->nr_frags ||
David S. Miller21dc3302010-08-23 00:13:46 -07003543 skb_has_frag_list(skb1) ||
David Howells716ea3a2007-04-02 20:19:53 -07003544 skb_tailroom(skb1) < tailbits)
3545 ntail = tailbits + 128;
3546 }
3547
3548 if (copyflag ||
3549 skb_cloned(skb1) ||
3550 ntail ||
3551 skb_shinfo(skb1)->nr_frags ||
David S. Miller21dc3302010-08-23 00:13:46 -07003552 skb_has_frag_list(skb1)) {
David Howells716ea3a2007-04-02 20:19:53 -07003553 struct sk_buff *skb2;
3554
3555 /* Fuck, we are miserable poor guys... */
3556 if (ntail == 0)
3557 skb2 = skb_copy(skb1, GFP_ATOMIC);
3558 else
3559 skb2 = skb_copy_expand(skb1,
3560 skb_headroom(skb1),
3561 ntail,
3562 GFP_ATOMIC);
3563 if (unlikely(skb2 == NULL))
3564 return -ENOMEM;
3565
3566 if (skb1->sk)
3567 skb_set_owner_w(skb2, skb1->sk);
3568
3569 /* Looking around. Are we still alive?
3570 * OK, link new skb, drop old one */
3571
3572 skb2->next = skb1->next;
3573 *skb_p = skb2;
3574 kfree_skb(skb1);
3575 skb1 = skb2;
3576 }
3577 elt++;
3578 *trailer = skb1;
3579 skb_p = &skb1->next;
3580 }
3581
3582 return elt;
3583}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003584EXPORT_SYMBOL_GPL(skb_cow_data);
David Howells716ea3a2007-04-02 20:19:53 -07003585
Eric Dumazetb1faf562010-05-31 23:44:05 -07003586static void sock_rmem_free(struct sk_buff *skb)
3587{
3588 struct sock *sk = skb->sk;
3589
3590 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3591}
3592
3593/*
3594 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3595 */
3596int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3597{
3598 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
Eric Dumazet95c96172012-04-15 05:58:06 +00003599 (unsigned int)sk->sk_rcvbuf)
Eric Dumazetb1faf562010-05-31 23:44:05 -07003600 return -ENOMEM;
3601
3602 skb_orphan(skb);
3603 skb->sk = sk;
3604 skb->destructor = sock_rmem_free;
3605 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3606
Eric Dumazetabb57ea2011-05-18 02:21:31 -04003607 /* before exiting rcu section, make sure dst is refcounted */
3608 skb_dst_force(skb);
3609
Eric Dumazetb1faf562010-05-31 23:44:05 -07003610 skb_queue_tail(&sk->sk_error_queue, skb);
3611 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -04003612 sk->sk_data_ready(sk);
Eric Dumazetb1faf562010-05-31 23:44:05 -07003613 return 0;
3614}
3615EXPORT_SYMBOL(sock_queue_err_skb);
3616
Willem de Bruijn364a9e92014-08-31 21:30:27 -04003617struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3618{
3619 struct sk_buff_head *q = &sk->sk_error_queue;
3620 struct sk_buff *skb, *skb_next;
3621 int err = 0;
3622
3623 spin_lock_bh(&q->lock);
3624 skb = __skb_dequeue(q);
3625 if (skb && (skb_next = skb_peek(q)))
3626 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3627 spin_unlock_bh(&q->lock);
3628
3629 sk->sk_err = err;
3630 if (err)
3631 sk->sk_error_report(sk);
3632
3633 return skb;
3634}
3635EXPORT_SYMBOL(sock_dequeue_err_skb);
3636
Alexander Duyckcab41c42014-09-10 18:05:26 -04003637/**
3638 * skb_clone_sk - create clone of skb, and take reference to socket
3639 * @skb: the skb to clone
3640 *
3641 * This function creates a clone of a buffer that holds a reference on
3642 * sk_refcnt. Buffers created via this function are meant to be
3643 * returned using sock_queue_err_skb, or free via kfree_skb.
3644 *
3645 * When passing buffers allocated with this function to sock_queue_err_skb
3646 * it is necessary to wrap the call with sock_hold/sock_put in order to
3647 * prevent the socket from being released prior to being enqueued on
3648 * the sk_error_queue.
3649 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04003650struct sk_buff *skb_clone_sk(struct sk_buff *skb)
3651{
3652 struct sock *sk = skb->sk;
3653 struct sk_buff *clone;
3654
3655 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
3656 return NULL;
3657
3658 clone = skb_clone(skb, GFP_ATOMIC);
3659 if (!clone) {
3660 sock_put(sk);
3661 return NULL;
3662 }
3663
3664 clone->sk = sk;
3665 clone->destructor = sock_efree;
3666
3667 return clone;
3668}
3669EXPORT_SYMBOL(skb_clone_sk);
3670
Alexander Duyck37846ef2014-09-04 13:31:10 -04003671static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3672 struct sock *sk,
3673 int tstype)
Patrick Ohlyac45f602009-02-12 05:03:37 +00003674{
Patrick Ohlyac45f602009-02-12 05:03:37 +00003675 struct sock_exterr_skb *serr;
Patrick Ohlyac45f602009-02-12 05:03:37 +00003676 int err;
3677
Patrick Ohlyac45f602009-02-12 05:03:37 +00003678 serr = SKB_EXT_ERR(skb);
3679 memset(serr, 0, sizeof(*serr));
3680 serr->ee.ee_errno = ENOMSG;
3681 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003682 serr->ee.ee_info = tstype;
Willem de Bruijn4ed2d762014-08-04 22:11:49 -04003683 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
Willem de Bruijn09c2d252014-08-04 22:11:47 -04003684 serr->ee.ee_data = skb_shinfo(skb)->tskey;
Willem de Bruijn4ed2d762014-08-04 22:11:49 -04003685 if (sk->sk_protocol == IPPROTO_TCP)
3686 serr->ee.ee_data -= sk->sk_tskey;
3687 }
Eric Dumazet29030372010-05-29 00:20:48 -07003688
Patrick Ohlyac45f602009-02-12 05:03:37 +00003689 err = sock_queue_err_skb(sk, skb);
Eric Dumazet29030372010-05-29 00:20:48 -07003690
Patrick Ohlyac45f602009-02-12 05:03:37 +00003691 if (err)
3692 kfree_skb(skb);
3693}
Alexander Duyck37846ef2014-09-04 13:31:10 -04003694
Willem de Bruijnb245be12015-01-30 13:29:32 -05003695static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
3696{
3697 bool ret;
3698
3699 if (likely(sysctl_tstamp_allow_data || tsonly))
3700 return true;
3701
3702 read_lock_bh(&sk->sk_callback_lock);
3703 ret = sk->sk_socket && sk->sk_socket->file &&
3704 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
3705 read_unlock_bh(&sk->sk_callback_lock);
3706 return ret;
3707}
3708
Alexander Duyck37846ef2014-09-04 13:31:10 -04003709void skb_complete_tx_timestamp(struct sk_buff *skb,
3710 struct skb_shared_hwtstamps *hwtstamps)
3711{
3712 struct sock *sk = skb->sk;
3713
Willem de Bruijnb245be12015-01-30 13:29:32 -05003714 if (!skb_may_tx_timestamp(sk, false))
3715 return;
3716
Alexander Duyck62bccb82014-09-04 13:31:35 -04003717 /* take a reference to prevent skb_orphan() from freeing the socket */
3718 sock_hold(sk);
Alexander Duyck37846ef2014-09-04 13:31:10 -04003719
Alexander Duyck62bccb82014-09-04 13:31:35 -04003720 *skb_hwtstamps(skb) = *hwtstamps;
3721 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
Alexander Duyck37846ef2014-09-04 13:31:10 -04003722
3723 sock_put(sk);
3724}
3725EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3726
3727void __skb_tstamp_tx(struct sk_buff *orig_skb,
3728 struct skb_shared_hwtstamps *hwtstamps,
3729 struct sock *sk, int tstype)
3730{
3731 struct sk_buff *skb;
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05003732 bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
Alexander Duyck37846ef2014-09-04 13:31:10 -04003733
Willem de Bruijnb245be12015-01-30 13:29:32 -05003734 if (!sk || !skb_may_tx_timestamp(sk, tsonly))
Alexander Duyck37846ef2014-09-04 13:31:10 -04003735 return;
3736
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05003737 if (tsonly)
3738 skb = alloc_skb(0, GFP_ATOMIC);
Alexander Duyck37846ef2014-09-04 13:31:10 -04003739 else
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05003740 skb = skb_clone(orig_skb, GFP_ATOMIC);
Alexander Duyck37846ef2014-09-04 13:31:10 -04003741 if (!skb)
3742 return;
3743
Willem de Bruijn49ca0d82015-01-30 13:29:31 -05003744 if (tsonly) {
3745 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
3746 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
3747 }
3748
3749 if (hwtstamps)
3750 *skb_hwtstamps(skb) = *hwtstamps;
3751 else
3752 skb->tstamp = ktime_get_real();
3753
Alexander Duyck37846ef2014-09-04 13:31:10 -04003754 __skb_complete_tx_timestamp(skb, sk, tstype);
3755}
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003756EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3757
3758void skb_tstamp_tx(struct sk_buff *orig_skb,
3759 struct skb_shared_hwtstamps *hwtstamps)
3760{
3761 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
3762 SCM_TSTAMP_SND);
3763}
Patrick Ohlyac45f602009-02-12 05:03:37 +00003764EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3765
Johannes Berg6e3e9392011-11-09 10:15:42 +01003766void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3767{
3768 struct sock *sk = skb->sk;
3769 struct sock_exterr_skb *serr;
3770 int err;
3771
3772 skb->wifi_acked_valid = 1;
3773 skb->wifi_acked = acked;
3774
3775 serr = SKB_EXT_ERR(skb);
3776 memset(serr, 0, sizeof(*serr));
3777 serr->ee.ee_errno = ENOMSG;
3778 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3779
Alexander Duyckbf7fa552014-09-10 18:05:42 -04003780 /* take a reference to prevent skb_orphan() from freeing the socket */
3781 sock_hold(sk);
3782
Johannes Berg6e3e9392011-11-09 10:15:42 +01003783 err = sock_queue_err_skb(sk, skb);
3784 if (err)
3785 kfree_skb(skb);
Alexander Duyckbf7fa552014-09-10 18:05:42 -04003786
3787 sock_put(sk);
Johannes Berg6e3e9392011-11-09 10:15:42 +01003788}
3789EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3790
Patrick Ohlyac45f602009-02-12 05:03:37 +00003791
Rusty Russellf35d9d82008-02-04 23:49:54 -05003792/**
3793 * skb_partial_csum_set - set up and verify partial csum values for packet
3794 * @skb: the skb to set
3795 * @start: the number of bytes after skb->data to start checksumming.
3796 * @off: the offset from start to place the checksum.
3797 *
3798 * For untrusted partially-checksummed packets, we need to make sure the values
3799 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3800 *
3801 * This function checks and sets those values and skb->ip_summed: if this
3802 * returns false you should drop the packet.
3803 */
3804bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3805{
Herbert Xu5ff8dda2009-06-04 01:22:01 +00003806 if (unlikely(start > skb_headlen(skb)) ||
3807 unlikely((int)start + off > skb_headlen(skb) - 2)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003808 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3809 start, off, skb_headlen(skb));
Rusty Russellf35d9d82008-02-04 23:49:54 -05003810 return false;
3811 }
3812 skb->ip_summed = CHECKSUM_PARTIAL;
3813 skb->csum_start = skb_headroom(skb) + start;
3814 skb->csum_offset = off;
Jason Wange5d5dec2013-03-26 23:11:20 +00003815 skb_set_transport_header(skb, start);
Rusty Russellf35d9d82008-02-04 23:49:54 -05003816 return true;
3817}
David S. Millerb4ac530fc2009-02-10 02:09:24 -08003818EXPORT_SYMBOL_GPL(skb_partial_csum_set);
Rusty Russellf35d9d82008-02-04 23:49:54 -05003819
Paul Durranted1f50c2014-01-09 10:02:46 +00003820static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3821 unsigned int max)
3822{
3823 if (skb_headlen(skb) >= len)
3824 return 0;
3825
3826 /* If we need to pullup then pullup to the max, so we
3827 * won't need to do it again.
3828 */
3829 if (max > skb->len)
3830 max = skb->len;
3831
3832 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3833 return -ENOMEM;
3834
3835 if (skb_headlen(skb) < len)
3836 return -EPROTO;
3837
3838 return 0;
3839}
3840
Jan Beulichf9708b42014-03-11 13:56:05 +00003841#define MAX_TCP_HDR_LEN (15 * 4)
3842
3843static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3844 typeof(IPPROTO_IP) proto,
3845 unsigned int off)
3846{
3847 switch (proto) {
3848 int err;
3849
3850 case IPPROTO_TCP:
3851 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3852 off + MAX_TCP_HDR_LEN);
3853 if (!err && !skb_partial_csum_set(skb, off,
3854 offsetof(struct tcphdr,
3855 check)))
3856 err = -EPROTO;
3857 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3858
3859 case IPPROTO_UDP:
3860 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3861 off + sizeof(struct udphdr));
3862 if (!err && !skb_partial_csum_set(skb, off,
3863 offsetof(struct udphdr,
3864 check)))
3865 err = -EPROTO;
3866 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3867 }
3868
3869 return ERR_PTR(-EPROTO);
3870}
3871
Paul Durranted1f50c2014-01-09 10:02:46 +00003872/* This value should be large enough to cover a tagged ethernet header plus
3873 * maximally sized IP and TCP or UDP headers.
3874 */
3875#define MAX_IP_HDR_LEN 128
3876
Jan Beulichf9708b42014-03-11 13:56:05 +00003877static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
Paul Durranted1f50c2014-01-09 10:02:46 +00003878{
3879 unsigned int off;
3880 bool fragment;
Jan Beulichf9708b42014-03-11 13:56:05 +00003881 __sum16 *csum;
Paul Durranted1f50c2014-01-09 10:02:46 +00003882 int err;
3883
3884 fragment = false;
3885
3886 err = skb_maybe_pull_tail(skb,
3887 sizeof(struct iphdr),
3888 MAX_IP_HDR_LEN);
3889 if (err < 0)
3890 goto out;
3891
3892 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3893 fragment = true;
3894
3895 off = ip_hdrlen(skb);
3896
3897 err = -EPROTO;
3898
3899 if (fragment)
3900 goto out;
3901
Jan Beulichf9708b42014-03-11 13:56:05 +00003902 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3903 if (IS_ERR(csum))
3904 return PTR_ERR(csum);
Paul Durranted1f50c2014-01-09 10:02:46 +00003905
Jan Beulichf9708b42014-03-11 13:56:05 +00003906 if (recalculate)
3907 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3908 ip_hdr(skb)->daddr,
3909 skb->len - off,
3910 ip_hdr(skb)->protocol, 0);
Paul Durranted1f50c2014-01-09 10:02:46 +00003911 err = 0;
3912
3913out:
3914 return err;
3915}
3916
3917/* This value should be large enough to cover a tagged ethernet header plus
3918 * an IPv6 header, all options, and a maximal TCP or UDP header.
3919 */
3920#define MAX_IPV6_HDR_LEN 256
3921
3922#define OPT_HDR(type, skb, off) \
3923 (type *)(skb_network_header(skb) + (off))
3924
3925static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3926{
3927 int err;
3928 u8 nexthdr;
3929 unsigned int off;
3930 unsigned int len;
3931 bool fragment;
3932 bool done;
Jan Beulichf9708b42014-03-11 13:56:05 +00003933 __sum16 *csum;
Paul Durranted1f50c2014-01-09 10:02:46 +00003934
3935 fragment = false;
3936 done = false;
3937
3938 off = sizeof(struct ipv6hdr);
3939
3940 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3941 if (err < 0)
3942 goto out;
3943
3944 nexthdr = ipv6_hdr(skb)->nexthdr;
3945
3946 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3947 while (off <= len && !done) {
3948 switch (nexthdr) {
3949 case IPPROTO_DSTOPTS:
3950 case IPPROTO_HOPOPTS:
3951 case IPPROTO_ROUTING: {
3952 struct ipv6_opt_hdr *hp;
3953
3954 err = skb_maybe_pull_tail(skb,
3955 off +
3956 sizeof(struct ipv6_opt_hdr),
3957 MAX_IPV6_HDR_LEN);
3958 if (err < 0)
3959 goto out;
3960
3961 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3962 nexthdr = hp->nexthdr;
3963 off += ipv6_optlen(hp);
3964 break;
3965 }
3966 case IPPROTO_AH: {
3967 struct ip_auth_hdr *hp;
3968
3969 err = skb_maybe_pull_tail(skb,
3970 off +
3971 sizeof(struct ip_auth_hdr),
3972 MAX_IPV6_HDR_LEN);
3973 if (err < 0)
3974 goto out;
3975
3976 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3977 nexthdr = hp->nexthdr;
3978 off += ipv6_authlen(hp);
3979 break;
3980 }
3981 case IPPROTO_FRAGMENT: {
3982 struct frag_hdr *hp;
3983
3984 err = skb_maybe_pull_tail(skb,
3985 off +
3986 sizeof(struct frag_hdr),
3987 MAX_IPV6_HDR_LEN);
3988 if (err < 0)
3989 goto out;
3990
3991 hp = OPT_HDR(struct frag_hdr, skb, off);
3992
3993 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3994 fragment = true;
3995
3996 nexthdr = hp->nexthdr;
3997 off += sizeof(struct frag_hdr);
3998 break;
3999 }
4000 default:
4001 done = true;
4002 break;
4003 }
4004 }
4005
4006 err = -EPROTO;
4007
4008 if (!done || fragment)
4009 goto out;
4010
Jan Beulichf9708b42014-03-11 13:56:05 +00004011 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4012 if (IS_ERR(csum))
4013 return PTR_ERR(csum);
Paul Durranted1f50c2014-01-09 10:02:46 +00004014
Jan Beulichf9708b42014-03-11 13:56:05 +00004015 if (recalculate)
4016 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4017 &ipv6_hdr(skb)->daddr,
4018 skb->len - off, nexthdr, 0);
Paul Durranted1f50c2014-01-09 10:02:46 +00004019 err = 0;
4020
4021out:
4022 return err;
4023}
4024
4025/**
4026 * skb_checksum_setup - set up partial checksum offset
4027 * @skb: the skb to set up
4028 * @recalculate: if true the pseudo-header checksum will be recalculated
4029 */
4030int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4031{
4032 int err;
4033
4034 switch (skb->protocol) {
4035 case htons(ETH_P_IP):
Jan Beulichf9708b42014-03-11 13:56:05 +00004036 err = skb_checksum_setup_ipv4(skb, recalculate);
Paul Durranted1f50c2014-01-09 10:02:46 +00004037 break;
4038
4039 case htons(ETH_P_IPV6):
4040 err = skb_checksum_setup_ipv6(skb, recalculate);
4041 break;
4042
4043 default:
4044 err = -EPROTO;
4045 break;
4046 }
4047
4048 return err;
4049}
4050EXPORT_SYMBOL(skb_checksum_setup);
4051
Ben Hutchings4497b072008-06-19 16:22:28 -07004052void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4053{
Joe Perchese87cc472012-05-13 21:56:26 +00004054 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4055 skb->dev->name);
Ben Hutchings4497b072008-06-19 16:22:28 -07004056}
Ben Hutchings4497b072008-06-19 16:22:28 -07004057EXPORT_SYMBOL(__skb_warn_lro_forwarding);
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004058
4059void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4060{
Eric Dumazet3d861f62012-10-22 09:03:40 +00004061 if (head_stolen) {
4062 skb_release_head_state(skb);
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004063 kmem_cache_free(skbuff_head_cache, skb);
Eric Dumazet3d861f62012-10-22 09:03:40 +00004064 } else {
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004065 __kfree_skb(skb);
Eric Dumazet3d861f62012-10-22 09:03:40 +00004066 }
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004067}
4068EXPORT_SYMBOL(kfree_skb_partial);
4069
4070/**
4071 * skb_try_coalesce - try to merge skb to prior one
4072 * @to: prior buffer
4073 * @from: buffer to add
4074 * @fragstolen: pointer to boolean
Randy Dunlapc6c4b972012-06-08 14:01:44 +00004075 * @delta_truesize: how much more was allocated than was requested
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004076 */
4077bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4078 bool *fragstolen, int *delta_truesize)
4079{
4080 int i, delta, len = from->len;
4081
4082 *fragstolen = false;
4083
4084 if (skb_cloned(to))
4085 return false;
4086
4087 if (len <= skb_tailroom(to)) {
Eric Dumazete93a0432014-09-15 04:19:52 -07004088 if (len)
4089 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004090 *delta_truesize = 0;
4091 return true;
4092 }
4093
4094 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4095 return false;
4096
4097 if (skb_headlen(from) != 0) {
4098 struct page *page;
4099 unsigned int offset;
4100
4101 if (skb_shinfo(to)->nr_frags +
4102 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4103 return false;
4104
4105 if (skb_head_is_locked(from))
4106 return false;
4107
4108 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4109
4110 page = virt_to_head_page(from->head);
4111 offset = from->data - (unsigned char *)page_address(page);
4112
4113 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4114 page, offset, skb_headlen(from));
4115 *fragstolen = true;
4116 } else {
4117 if (skb_shinfo(to)->nr_frags +
4118 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4119 return false;
4120
Weiping Panf4b549a2012-09-28 20:15:30 +00004121 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004122 }
4123
4124 WARN_ON_ONCE(delta < len);
4125
4126 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4127 skb_shinfo(from)->frags,
4128 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4129 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4130
4131 if (!skb_cloned(from))
4132 skb_shinfo(from)->nr_frags = 0;
4133
Li RongQing8ea853f2012-09-18 16:53:21 +00004134 /* if the skb is not cloned this does nothing
4135 * since we set nr_frags to 0.
4136 */
Eric Dumazetbad43ca2012-05-19 03:02:02 +00004137 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4138 skb_frag_ref(from, i);
4139
4140 to->truesize += delta;
4141 to->len += len;
4142 to->data_len += len;
4143
4144 *delta_truesize = delta;
4145 return true;
4146}
4147EXPORT_SYMBOL(skb_try_coalesce);
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004148
4149/**
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02004150 * skb_scrub_packet - scrub an skb
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004151 *
4152 * @skb: buffer to clean
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02004153 * @xnet: packet is crossing netns
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004154 *
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02004155 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4156 * into/from a tunnel. Some information have to be cleared during these
4157 * operations.
4158 * skb_scrub_packet can also be used to clean a skb before injecting it in
4159 * another namespace (@xnet == true). We have to clear all information in the
4160 * skb that could impact namespace isolation.
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004161 */
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02004162void skb_scrub_packet(struct sk_buff *skb, bool xnet)
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004163{
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02004164 if (xnet)
4165 skb_orphan(skb);
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004166 skb->tstamp.tv64 = 0;
4167 skb->pkt_type = PACKET_HOST;
4168 skb->skb_iif = 0;
WANG Cong60ff7462014-05-04 16:39:18 -07004169 skb->ignore_df = 0;
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004170 skb_dst_drop(skb);
4171 skb->mark = 0;
Thomas Grafb8fb4e02014-12-23 01:13:18 +01004172 skb_init_secmark(skb);
Nicolas Dichtel621e84d2013-06-26 16:11:27 +02004173 secpath_reset(skb);
4174 nf_reset(skb);
4175 nf_reset_trace(skb);
4176}
4177EXPORT_SYMBOL_GPL(skb_scrub_packet);
Florian Westphalde960aa2014-01-26 10:58:16 +01004178
4179/**
4180 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4181 *
4182 * @skb: GSO skb
4183 *
4184 * skb_gso_transport_seglen is used to determine the real size of the
4185 * individual segments, including Layer4 headers (TCP/UDP).
4186 *
4187 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4188 */
4189unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4190{
4191 const struct skb_shared_info *shinfo = skb_shinfo(skb);
Florian Westphalf993bc22014-10-20 13:49:18 +02004192 unsigned int thlen = 0;
Florian Westphalde960aa2014-01-26 10:58:16 +01004193
Florian Westphalf993bc22014-10-20 13:49:18 +02004194 if (skb->encapsulation) {
4195 thlen = skb_inner_transport_header(skb) -
4196 skb_transport_header(skb);
Florian Westphal6d39d582014-04-09 10:28:50 +02004197
Florian Westphalf993bc22014-10-20 13:49:18 +02004198 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4199 thlen += inner_tcp_hdrlen(skb);
4200 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4201 thlen = tcp_hdrlen(skb);
4202 }
Florian Westphal6d39d582014-04-09 10:28:50 +02004203 /* UFO sets gso_size to the size of the fragmentation
4204 * payload, i.e. the size of the L4 (UDP) header is already
4205 * accounted for.
4206 */
Florian Westphalf993bc22014-10-20 13:49:18 +02004207 return thlen + shinfo->gso_size;
Florian Westphalde960aa2014-01-26 10:58:16 +01004208}
4209EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004210
4211static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4212{
4213 if (skb_cow(skb, skb_headroom(skb)) < 0) {
4214 kfree_skb(skb);
4215 return NULL;
4216 }
4217
4218 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
4219 skb->mac_header += VLAN_HLEN;
4220 return skb;
4221}
4222
4223struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
4224{
4225 struct vlan_hdr *vhdr;
4226 u16 vlan_tci;
4227
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004228 if (unlikely(skb_vlan_tag_present(skb))) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004229 /* vlan_tci is already set-up so leave this for another time */
4230 return skb;
4231 }
4232
4233 skb = skb_share_check(skb, GFP_ATOMIC);
4234 if (unlikely(!skb))
4235 goto err_free;
4236
4237 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
4238 goto err_free;
4239
4240 vhdr = (struct vlan_hdr *)skb->data;
4241 vlan_tci = ntohs(vhdr->h_vlan_TCI);
4242 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
4243
4244 skb_pull_rcsum(skb, VLAN_HLEN);
4245 vlan_set_encap_proto(skb, vhdr);
4246
4247 skb = skb_reorder_vlan_header(skb);
4248 if (unlikely(!skb))
4249 goto err_free;
4250
4251 skb_reset_network_header(skb);
4252 skb_reset_transport_header(skb);
4253 skb_reset_mac_len(skb);
4254
4255 return skb;
4256
4257err_free:
4258 kfree_skb(skb);
4259 return NULL;
4260}
4261EXPORT_SYMBOL(skb_vlan_untag);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07004262
Jiri Pirkoe2195122014-11-19 14:05:01 +01004263int skb_ensure_writable(struct sk_buff *skb, int write_len)
4264{
4265 if (!pskb_may_pull(skb, write_len))
4266 return -ENOMEM;
4267
4268 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
4269 return 0;
4270
4271 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4272}
4273EXPORT_SYMBOL(skb_ensure_writable);
4274
Jiri Pirko93515d52014-11-19 14:05:02 +01004275/* remove VLAN header from packet and update csum accordingly. */
4276static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
4277{
4278 struct vlan_hdr *vhdr;
4279 unsigned int offset = skb->data - skb_mac_header(skb);
4280 int err;
4281
4282 __skb_push(skb, offset);
4283 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
4284 if (unlikely(err))
4285 goto pull;
4286
4287 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4288
4289 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
4290 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
4291
4292 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
4293 __skb_pull(skb, VLAN_HLEN);
4294
4295 vlan_set_encap_proto(skb, vhdr);
4296 skb->mac_header += VLAN_HLEN;
4297
4298 if (skb_network_offset(skb) < ETH_HLEN)
4299 skb_set_network_header(skb, ETH_HLEN);
4300
4301 skb_reset_mac_len(skb);
4302pull:
4303 __skb_pull(skb, offset);
4304
4305 return err;
4306}
4307
4308int skb_vlan_pop(struct sk_buff *skb)
4309{
4310 u16 vlan_tci;
4311 __be16 vlan_proto;
4312 int err;
4313
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004314 if (likely(skb_vlan_tag_present(skb))) {
Jiri Pirko93515d52014-11-19 14:05:02 +01004315 skb->vlan_tci = 0;
4316 } else {
4317 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
4318 skb->protocol != htons(ETH_P_8021AD)) ||
4319 skb->len < VLAN_ETH_HLEN))
4320 return 0;
4321
4322 err = __skb_vlan_pop(skb, &vlan_tci);
4323 if (err)
4324 return err;
4325 }
4326 /* move next vlan tag to hw accel tag */
4327 if (likely((skb->protocol != htons(ETH_P_8021Q) &&
4328 skb->protocol != htons(ETH_P_8021AD)) ||
4329 skb->len < VLAN_ETH_HLEN))
4330 return 0;
4331
4332 vlan_proto = skb->protocol;
4333 err = __skb_vlan_pop(skb, &vlan_tci);
4334 if (unlikely(err))
4335 return err;
4336
4337 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4338 return 0;
4339}
4340EXPORT_SYMBOL(skb_vlan_pop);
4341
4342int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4343{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004344 if (skb_vlan_tag_present(skb)) {
Jiri Pirko93515d52014-11-19 14:05:02 +01004345 unsigned int offset = skb->data - skb_mac_header(skb);
4346 int err;
4347
4348 /* __vlan_insert_tag expect skb->data pointing to mac header.
4349 * So change skb->data before calling it and change back to
4350 * original position later
4351 */
4352 __skb_push(skb, offset);
4353 err = __vlan_insert_tag(skb, skb->vlan_proto,
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004354 skb_vlan_tag_get(skb));
Jiri Pirko93515d52014-11-19 14:05:02 +01004355 if (err)
4356 return err;
4357 skb->protocol = skb->vlan_proto;
4358 skb->mac_len += VLAN_HLEN;
4359 __skb_pull(skb, offset);
4360
4361 if (skb->ip_summed == CHECKSUM_COMPLETE)
4362 skb->csum = csum_add(skb->csum, csum_partial(skb->data
4363 + (2 * ETH_ALEN), VLAN_HLEN, 0));
4364 }
4365 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4366 return 0;
4367}
4368EXPORT_SYMBOL(skb_vlan_push);
4369
Eric Dumazet2e4e4412014-09-17 04:49:49 -07004370/**
4371 * alloc_skb_with_frags - allocate skb with page frags
4372 *
Masanari Iidade3f0d02014-10-09 12:58:08 +09004373 * @header_len: size of linear part
4374 * @data_len: needed length in frags
4375 * @max_page_order: max page order desired.
4376 * @errcode: pointer to error code if any
4377 * @gfp_mask: allocation mask
Eric Dumazet2e4e4412014-09-17 04:49:49 -07004378 *
4379 * This can be used to allocate a paged skb, given a maximal order for frags.
4380 */
4381struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4382 unsigned long data_len,
4383 int max_page_order,
4384 int *errcode,
4385 gfp_t gfp_mask)
4386{
4387 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
4388 unsigned long chunk;
4389 struct sk_buff *skb;
4390 struct page *page;
4391 gfp_t gfp_head;
4392 int i;
4393
4394 *errcode = -EMSGSIZE;
4395 /* Note this test could be relaxed, if we succeed to allocate
4396 * high order pages...
4397 */
4398 if (npages > MAX_SKB_FRAGS)
4399 return NULL;
4400
4401 gfp_head = gfp_mask;
4402 if (gfp_head & __GFP_WAIT)
4403 gfp_head |= __GFP_REPEAT;
4404
4405 *errcode = -ENOBUFS;
4406 skb = alloc_skb(header_len, gfp_head);
4407 if (!skb)
4408 return NULL;
4409
4410 skb->truesize += npages << PAGE_SHIFT;
4411
4412 for (i = 0; npages > 0; i++) {
4413 int order = max_page_order;
4414
4415 while (order) {
4416 if (npages >= 1 << order) {
4417 page = alloc_pages(gfp_mask |
4418 __GFP_COMP |
4419 __GFP_NOWARN |
4420 __GFP_NORETRY,
4421 order);
4422 if (page)
4423 goto fill_page;
4424 /* Do not retry other high order allocations */
4425 order = 1;
4426 max_page_order = 0;
4427 }
4428 order--;
4429 }
4430 page = alloc_page(gfp_mask);
4431 if (!page)
4432 goto failure;
4433fill_page:
4434 chunk = min_t(unsigned long, data_len,
4435 PAGE_SIZE << order);
4436 skb_fill_page_desc(skb, i, page, 0, chunk);
4437 data_len -= chunk;
4438 npages -= 1 << order;
4439 }
4440 return skb;
4441
4442failure:
4443 kfree_skb(skb);
4444 return NULL;
4445}
4446EXPORT_SYMBOL(alloc_skb_with_frags);