blob: 3bc0cf07661c43fd6bcd81d65177680168a07f32 [file] [log] [blame]
Tom Herbert23461552014-09-17 12:25:56 -07001#include <linux/module.h>
2#include <linux/errno.h>
3#include <linux/socket.h>
4#include <linux/skbuff.h>
5#include <linux/ip.h>
6#include <linux/udp.h>
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <net/genetlink.h>
Tom Herbert37dd0242014-10-03 15:48:09 -070010#include <net/gue.h>
Tom Herbert23461552014-09-17 12:25:56 -070011#include <net/ip.h>
Tom Herbertafe93322014-09-17 12:25:57 -070012#include <net/protocol.h>
Tom Herbert23461552014-09-17 12:25:56 -070013#include <net/udp.h>
14#include <net/udp_tunnel.h>
15#include <net/xfrm.h>
16#include <uapi/linux/fou.h>
17#include <uapi/linux/genetlink.h>
18
19static DEFINE_SPINLOCK(fou_lock);
20static LIST_HEAD(fou_list);
21
22struct fou {
23 struct socket *sock;
24 u8 protocol;
25 u16 port;
Tom Herbertafe93322014-09-17 12:25:57 -070026 struct udp_offload udp_offloads;
Tom Herbert23461552014-09-17 12:25:56 -070027 struct list_head list;
28};
29
30struct fou_cfg {
Tom Herbert37dd0242014-10-03 15:48:09 -070031 u16 type;
Tom Herbert23461552014-09-17 12:25:56 -070032 u8 protocol;
33 struct udp_port_cfg udp_config;
34};
35
36static inline struct fou *fou_from_sock(struct sock *sk)
37{
38 return sk->sk_user_data;
39}
40
Tom Herbert5024c332014-11-04 09:06:53 -080041static void fou_recv_pull(struct sk_buff *skb, size_t len)
Tom Herbert23461552014-09-17 12:25:56 -070042{
43 struct iphdr *iph = ip_hdr(skb);
44
45 /* Remove 'len' bytes from the packet (UDP header and
Tom Herbert5024c332014-11-04 09:06:53 -080046 * FOU header if present).
Tom Herbert23461552014-09-17 12:25:56 -070047 */
48 iph->tot_len = htons(ntohs(iph->tot_len) - len);
49 __skb_pull(skb, len);
50 skb_postpull_rcsum(skb, udp_hdr(skb), len);
51 skb_reset_transport_header(skb);
Tom Herbert23461552014-09-17 12:25:56 -070052}
53
54static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
55{
56 struct fou *fou = fou_from_sock(sk);
57
58 if (!fou)
59 return 1;
60
Tom Herbert5024c332014-11-04 09:06:53 -080061 fou_recv_pull(skb, sizeof(struct udphdr));
62
63 return -fou->protocol;
64}
65
Tom Herberta8d31c12014-11-04 09:06:57 -080066static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
Tom Herbert4fd671d2014-11-25 11:21:20 -080067 void *data, size_t hdrlen, u8 ipproto)
Tom Herberta8d31c12014-11-04 09:06:57 -080068{
69 __be16 *pd = data;
Tom Herbert4fd671d2014-11-25 11:21:20 -080070 size_t start = ntohs(pd[0]);
71 size_t offset = ntohs(pd[1]);
72 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
73 __wsum delta;
Tom Herberta8d31c12014-11-04 09:06:57 -080074
75 if (skb->remcsum_offload) {
76 /* Already processed in GRO path */
77 skb->remcsum_offload = 0;
78 return guehdr;
79 }
80
Tom Herberta8d31c12014-11-04 09:06:57 -080081 if (!pskb_may_pull(skb, plen))
82 return NULL;
83 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
84
Tom Herbert4fd671d2014-11-25 11:21:20 -080085 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
86 __skb_checksum_complete(skb);
Tom Herberta8d31c12014-11-04 09:06:57 -080087
Tom Herbert4fd671d2014-11-25 11:21:20 -080088 delta = remcsum_adjust((void *)guehdr + hdrlen,
89 skb->csum, start, offset);
Tom Herberta8d31c12014-11-04 09:06:57 -080090
91 /* Adjust skb->csum since we changed the packet */
92 skb->csum = csum_add(skb->csum, delta);
93
94 return guehdr;
95}
96
Tom Herbert5024c332014-11-04 09:06:53 -080097static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
98{
99 /* No support yet */
100 kfree_skb(skb);
101 return 0;
Tom Herbert23461552014-09-17 12:25:56 -0700102}
103
Tom Herbert37dd0242014-10-03 15:48:09 -0700104static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
105{
106 struct fou *fou = fou_from_sock(sk);
Tom Herbert5024c332014-11-04 09:06:53 -0800107 size_t len, optlen, hdrlen;
Tom Herbert37dd0242014-10-03 15:48:09 -0700108 struct guehdr *guehdr;
Tom Herbert5024c332014-11-04 09:06:53 -0800109 void *data;
Tom Herberta8d31c12014-11-04 09:06:57 -0800110 u16 doffset = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700111
112 if (!fou)
113 return 1;
114
115 len = sizeof(struct udphdr) + sizeof(struct guehdr);
116 if (!pskb_may_pull(skb, len))
117 goto drop;
118
Tom Herbert5024c332014-11-04 09:06:53 -0800119 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
Tom Herbert37dd0242014-10-03 15:48:09 -0700120
Tom Herbert5024c332014-11-04 09:06:53 -0800121 optlen = guehdr->hlen << 2;
122 len += optlen;
123
Tom Herbert37dd0242014-10-03 15:48:09 -0700124 if (!pskb_may_pull(skb, len))
125 goto drop;
126
Tom Herbert5024c332014-11-04 09:06:53 -0800127 /* guehdr may change after pull */
128 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
Li RongQingd8f00d22014-10-17 16:53:47 +0800129
Tom Herbert5024c332014-11-04 09:06:53 -0800130 hdrlen = sizeof(struct guehdr) + optlen;
131
132 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
Tom Herbert37dd0242014-10-03 15:48:09 -0700133 goto drop;
134
Tom Herberta8d31c12014-11-04 09:06:57 -0800135 hdrlen = sizeof(struct guehdr) + optlen;
136
137 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
138
Tom Herberta8d31c12014-11-04 09:06:57 -0800139 /* Pull csum through the guehdr now . This can be used if
140 * there is a remote checksum offload.
141 */
142 skb_postpull_rcsum(skb, udp_hdr(skb), len);
Tom Herbert5024c332014-11-04 09:06:53 -0800143
144 data = &guehdr[1];
145
146 if (guehdr->flags & GUE_FLAG_PRIV) {
Tom Herberta8d31c12014-11-04 09:06:57 -0800147 __be32 flags = *(__be32 *)(data + doffset);
Tom Herbert5024c332014-11-04 09:06:53 -0800148
Tom Herberta8d31c12014-11-04 09:06:57 -0800149 doffset += GUE_LEN_PRIV;
150
151 if (flags & GUE_PFLAG_REMCSUM) {
152 guehdr = gue_remcsum(skb, guehdr, data + doffset,
153 hdrlen, guehdr->proto_ctype);
154 if (!guehdr)
155 goto drop;
156
157 data = &guehdr[1];
158
159 doffset += GUE_PLEN_REMCSUM;
160 }
Tom Herbert37dd0242014-10-03 15:48:09 -0700161 }
162
Tom Herbert5024c332014-11-04 09:06:53 -0800163 if (unlikely(guehdr->control))
164 return gue_control_message(skb, guehdr);
165
Tom Herbert4fd671d2014-11-25 11:21:20 -0800166 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
Tom Herberta8d31c12014-11-04 09:06:57 -0800167 skb_reset_transport_header(skb);
168
Tom Herbert5024c332014-11-04 09:06:53 -0800169 return -guehdr->proto_ctype;
170
Tom Herbert37dd0242014-10-03 15:48:09 -0700171drop:
172 kfree_skb(skb);
173 return 0;
174}
175
Tom Herbertafe93322014-09-17 12:25:57 -0700176static struct sk_buff **fou_gro_receive(struct sk_buff **head,
Tom Herberta2b12f32015-01-12 17:00:37 -0800177 struct sk_buff *skb,
178 struct udp_offload *uoff)
Tom Herbertafe93322014-09-17 12:25:57 -0700179{
180 const struct net_offload *ops;
181 struct sk_buff **pp = NULL;
182 u8 proto = NAPI_GRO_CB(skb)->proto;
Tom Herbertefc98d02014-10-03 15:48:08 -0700183 const struct net_offload **offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700184
185 rcu_read_lock();
Tom Herbertefc98d02014-10-03 15:48:08 -0700186 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700187 ops = rcu_dereference(offloads[proto]);
188 if (!ops || !ops->callbacks.gro_receive)
189 goto out_unlock;
190
191 pp = ops->callbacks.gro_receive(head, skb);
192
193out_unlock:
194 rcu_read_unlock();
195
196 return pp;
197}
198
Tom Herberta2b12f32015-01-12 17:00:37 -0800199static int fou_gro_complete(struct sk_buff *skb, int nhoff,
200 struct udp_offload *uoff)
Tom Herbertafe93322014-09-17 12:25:57 -0700201{
202 const struct net_offload *ops;
203 u8 proto = NAPI_GRO_CB(skb)->proto;
204 int err = -ENOSYS;
Tom Herbertefc98d02014-10-03 15:48:08 -0700205 const struct net_offload **offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700206
Jesse Grosscfdf1e12014-11-10 11:45:13 -0800207 udp_tunnel_gro_complete(skb, nhoff);
208
Tom Herbertafe93322014-09-17 12:25:57 -0700209 rcu_read_lock();
Tom Herbertefc98d02014-10-03 15:48:08 -0700210 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
Tom Herbertafe93322014-09-17 12:25:57 -0700211 ops = rcu_dereference(offloads[proto]);
212 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
213 goto out_unlock;
214
215 err = ops->callbacks.gro_complete(skb, nhoff);
216
217out_unlock:
218 rcu_read_unlock();
219
220 return err;
221}
222
Tom Herberta8d31c12014-11-04 09:06:57 -0800223static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
224 struct guehdr *guehdr, void *data,
225 size_t hdrlen, u8 ipproto)
226{
227 __be16 *pd = data;
Tom Herbert4fd671d2014-11-25 11:21:20 -0800228 size_t start = ntohs(pd[0]);
229 size_t offset = ntohs(pd[1]);
230 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
231 __wsum delta;
Tom Herberta8d31c12014-11-04 09:06:57 -0800232
233 if (skb->remcsum_offload)
234 return guehdr;
235
Tom Herbert4fd671d2014-11-25 11:21:20 -0800236 if (!NAPI_GRO_CB(skb)->csum_valid)
Tom Herberta8d31c12014-11-04 09:06:57 -0800237 return NULL;
238
Tom Herberta8d31c12014-11-04 09:06:57 -0800239 /* Pull checksum that will be written */
240 if (skb_gro_header_hard(skb, off + plen)) {
241 guehdr = skb_gro_header_slow(skb, off + plen, off);
242 if (!guehdr)
243 return NULL;
244 }
245
Tom Herbert4fd671d2014-11-25 11:21:20 -0800246 delta = remcsum_adjust((void *)guehdr + hdrlen,
247 NAPI_GRO_CB(skb)->csum, start, offset);
Tom Herberta8d31c12014-11-04 09:06:57 -0800248
249 /* Adjust skb->csum since we changed the packet */
250 skb->csum = csum_add(skb->csum, delta);
251 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
252
253 skb->remcsum_offload = 1;
254
255 return guehdr;
256}
257
Tom Herbert37dd0242014-10-03 15:48:09 -0700258static struct sk_buff **gue_gro_receive(struct sk_buff **head,
Tom Herberta2b12f32015-01-12 17:00:37 -0800259 struct sk_buff *skb,
260 struct udp_offload *uoff)
Tom Herbert37dd0242014-10-03 15:48:09 -0700261{
262 const struct net_offload **offloads;
263 const struct net_offload *ops;
264 struct sk_buff **pp = NULL;
265 struct sk_buff *p;
Tom Herbert37dd0242014-10-03 15:48:09 -0700266 struct guehdr *guehdr;
Tom Herbert5024c332014-11-04 09:06:53 -0800267 size_t len, optlen, hdrlen, off;
268 void *data;
Tom Herberta8d31c12014-11-04 09:06:57 -0800269 u16 doffset = 0;
Tom Herbert37dd0242014-10-03 15:48:09 -0700270 int flush = 1;
271
272 off = skb_gro_offset(skb);
Tom Herbert5024c332014-11-04 09:06:53 -0800273 len = off + sizeof(*guehdr);
274
Tom Herbert37dd0242014-10-03 15:48:09 -0700275 guehdr = skb_gro_header_fast(skb, off);
Tom Herbert5024c332014-11-04 09:06:53 -0800276 if (skb_gro_header_hard(skb, len)) {
277 guehdr = skb_gro_header_slow(skb, len, off);
Tom Herbert37dd0242014-10-03 15:48:09 -0700278 if (unlikely(!guehdr))
279 goto out;
280 }
281
Tom Herbert5024c332014-11-04 09:06:53 -0800282 optlen = guehdr->hlen << 2;
283 len += optlen;
Tom Herbert37dd0242014-10-03 15:48:09 -0700284
Tom Herbert5024c332014-11-04 09:06:53 -0800285 if (skb_gro_header_hard(skb, len)) {
286 guehdr = skb_gro_header_slow(skb, len, off);
Tom Herbert37dd0242014-10-03 15:48:09 -0700287 if (unlikely(!guehdr))
Tom Herbert5024c332014-11-04 09:06:53 -0800288 goto out;
289 }
290
291 if (unlikely(guehdr->control) || guehdr->version != 0 ||
292 validate_gue_flags(guehdr, optlen))
293 goto out;
294
295 hdrlen = sizeof(*guehdr) + optlen;
296
Tom Herberta8d31c12014-11-04 09:06:57 -0800297 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
298 * this is needed if there is a remote checkcsum offload.
299 */
Tom Herbert5024c332014-11-04 09:06:53 -0800300 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
301
302 data = &guehdr[1];
303
304 if (guehdr->flags & GUE_FLAG_PRIV) {
Tom Herberta8d31c12014-11-04 09:06:57 -0800305 __be32 flags = *(__be32 *)(data + doffset);
Tom Herbert5024c332014-11-04 09:06:53 -0800306
Tom Herberta8d31c12014-11-04 09:06:57 -0800307 doffset += GUE_LEN_PRIV;
308
309 if (flags & GUE_PFLAG_REMCSUM) {
310 guehdr = gue_gro_remcsum(skb, off, guehdr,
311 data + doffset, hdrlen,
312 guehdr->proto_ctype);
313 if (!guehdr)
314 goto out;
315
316 data = &guehdr[1];
317
318 doffset += GUE_PLEN_REMCSUM;
319 }
Tom Herbert37dd0242014-10-03 15:48:09 -0700320 }
321
Tom Herberta8d31c12014-11-04 09:06:57 -0800322 skb_gro_pull(skb, hdrlen);
323
Tom Herbert37dd0242014-10-03 15:48:09 -0700324 flush = 0;
325
326 for (p = *head; p; p = p->next) {
327 const struct guehdr *guehdr2;
328
329 if (!NAPI_GRO_CB(p)->same_flow)
330 continue;
331
332 guehdr2 = (struct guehdr *)(p->data + off);
333
334 /* Compare base GUE header to be equal (covers
Tom Herbert5024c332014-11-04 09:06:53 -0800335 * hlen, version, proto_ctype, and flags.
Tom Herbert37dd0242014-10-03 15:48:09 -0700336 */
337 if (guehdr->word != guehdr2->word) {
338 NAPI_GRO_CB(p)->same_flow = 0;
339 continue;
340 }
341
342 /* Compare optional fields are the same. */
343 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
344 guehdr->hlen << 2)) {
345 NAPI_GRO_CB(p)->same_flow = 0;
346 continue;
347 }
348 }
349
Tom Herbert5024c332014-11-04 09:06:53 -0800350 rcu_read_lock();
351 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
352 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
353 if (WARN_ON(!ops || !ops->callbacks.gro_receive))
354 goto out_unlock;
Tom Herbert37dd0242014-10-03 15:48:09 -0700355
356 pp = ops->callbacks.gro_receive(head, skb);
357
358out_unlock:
359 rcu_read_unlock();
360out:
361 NAPI_GRO_CB(skb)->flush |= flush;
362
363 return pp;
364}
365
Tom Herberta2b12f32015-01-12 17:00:37 -0800366static int gue_gro_complete(struct sk_buff *skb, int nhoff,
367 struct udp_offload *uoff)
Tom Herbert37dd0242014-10-03 15:48:09 -0700368{
369 const struct net_offload **offloads;
370 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
371 const struct net_offload *ops;
372 unsigned int guehlen;
373 u8 proto;
374 int err = -ENOENT;
375
Tom Herbert5024c332014-11-04 09:06:53 -0800376 proto = guehdr->proto_ctype;
Tom Herbert37dd0242014-10-03 15:48:09 -0700377
378 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
379
380 rcu_read_lock();
381 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
382 ops = rcu_dereference(offloads[proto]);
383 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
384 goto out_unlock;
385
386 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
387
388out_unlock:
389 rcu_read_unlock();
390 return err;
391}
392
Tom Herbert23461552014-09-17 12:25:56 -0700393static int fou_add_to_port_list(struct fou *fou)
394{
395 struct fou *fout;
396
397 spin_lock(&fou_lock);
398 list_for_each_entry(fout, &fou_list, list) {
399 if (fou->port == fout->port) {
400 spin_unlock(&fou_lock);
401 return -EALREADY;
402 }
403 }
404
405 list_add(&fou->list, &fou_list);
406 spin_unlock(&fou_lock);
407
408 return 0;
409}
410
411static void fou_release(struct fou *fou)
412{
413 struct socket *sock = fou->sock;
414 struct sock *sk = sock->sk;
415
416 udp_del_offload(&fou->udp_offloads);
417
418 list_del(&fou->list);
419
420 /* Remove hooks into tunnel socket */
421 sk->sk_user_data = NULL;
422
423 sock_release(sock);
424
425 kfree(fou);
426}
427
Tom Herbert37dd0242014-10-03 15:48:09 -0700428static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
429{
430 udp_sk(sk)->encap_rcv = fou_udp_recv;
431 fou->protocol = cfg->protocol;
432 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
433 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
434 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
435 fou->udp_offloads.ipproto = cfg->protocol;
436
437 return 0;
438}
439
440static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
441{
442 udp_sk(sk)->encap_rcv = gue_udp_recv;
443 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
444 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
445 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
446
447 return 0;
448}
449
Tom Herbert23461552014-09-17 12:25:56 -0700450static int fou_create(struct net *net, struct fou_cfg *cfg,
451 struct socket **sockp)
452{
453 struct fou *fou = NULL;
454 int err;
455 struct socket *sock = NULL;
456 struct sock *sk;
457
458 /* Open UDP socket */
459 err = udp_sock_create(net, &cfg->udp_config, &sock);
460 if (err < 0)
461 goto error;
462
463 /* Allocate FOU port structure */
464 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
465 if (!fou) {
466 err = -ENOMEM;
467 goto error;
468 }
469
470 sk = sock->sk;
471
Tom Herbert37dd0242014-10-03 15:48:09 -0700472 fou->port = cfg->udp_config.local_udp_port;
473
474 /* Initial for fou type */
475 switch (cfg->type) {
476 case FOU_ENCAP_DIRECT:
477 err = fou_encap_init(sk, fou, cfg);
478 if (err)
479 goto error;
480 break;
481 case FOU_ENCAP_GUE:
482 err = gue_encap_init(sk, fou, cfg);
483 if (err)
484 goto error;
485 break;
486 default:
487 err = -EINVAL;
488 goto error;
489 }
Tom Herbert23461552014-09-17 12:25:56 -0700490
491 udp_sk(sk)->encap_type = 1;
492 udp_encap_enable();
493
494 sk->sk_user_data = fou;
495 fou->sock = sock;
496
Tom Herbert224d0192015-01-05 13:56:14 -0800497 inet_inc_convert_csum(sk);
Tom Herbert23461552014-09-17 12:25:56 -0700498
499 sk->sk_allocation = GFP_ATOMIC;
500
Tom Herbertafe93322014-09-17 12:25:57 -0700501 if (cfg->udp_config.family == AF_INET) {
502 err = udp_add_offload(&fou->udp_offloads);
503 if (err)
504 goto error;
505 }
506
Tom Herbert23461552014-09-17 12:25:56 -0700507 err = fou_add_to_port_list(fou);
508 if (err)
509 goto error;
510
511 if (sockp)
512 *sockp = sock;
513
514 return 0;
515
516error:
517 kfree(fou);
518 if (sock)
519 sock_release(sock);
520
521 return err;
522}
523
524static int fou_destroy(struct net *net, struct fou_cfg *cfg)
525{
526 struct fou *fou;
527 u16 port = cfg->udp_config.local_udp_port;
528 int err = -EINVAL;
529
530 spin_lock(&fou_lock);
531 list_for_each_entry(fou, &fou_list, list) {
532 if (fou->port == port) {
Tom Herbertafe93322014-09-17 12:25:57 -0700533 udp_del_offload(&fou->udp_offloads);
Tom Herbert23461552014-09-17 12:25:56 -0700534 fou_release(fou);
535 err = 0;
536 break;
537 }
538 }
539 spin_unlock(&fou_lock);
540
541 return err;
542}
543
544static struct genl_family fou_nl_family = {
545 .id = GENL_ID_GENERATE,
546 .hdrsize = 0,
547 .name = FOU_GENL_NAME,
548 .version = FOU_GENL_VERSION,
549 .maxattr = FOU_ATTR_MAX,
550 .netnsok = true,
551};
552
553static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
554 [FOU_ATTR_PORT] = { .type = NLA_U16, },
555 [FOU_ATTR_AF] = { .type = NLA_U8, },
556 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
Tom Herbert37dd0242014-10-03 15:48:09 -0700557 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
Tom Herbert23461552014-09-17 12:25:56 -0700558};
559
560static int parse_nl_config(struct genl_info *info,
561 struct fou_cfg *cfg)
562{
563 memset(cfg, 0, sizeof(*cfg));
564
565 cfg->udp_config.family = AF_INET;
566
567 if (info->attrs[FOU_ATTR_AF]) {
568 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
569
570 if (family != AF_INET && family != AF_INET6)
571 return -EINVAL;
572
573 cfg->udp_config.family = family;
574 }
575
576 if (info->attrs[FOU_ATTR_PORT]) {
577 u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
578
579 cfg->udp_config.local_udp_port = port;
580 }
581
582 if (info->attrs[FOU_ATTR_IPPROTO])
583 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
584
Tom Herbert37dd0242014-10-03 15:48:09 -0700585 if (info->attrs[FOU_ATTR_TYPE])
586 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
587
Tom Herbert23461552014-09-17 12:25:56 -0700588 return 0;
589}
590
591static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
592{
593 struct fou_cfg cfg;
594 int err;
595
596 err = parse_nl_config(info, &cfg);
597 if (err)
598 return err;
599
600 return fou_create(&init_net, &cfg, NULL);
601}
602
603static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
604{
605 struct fou_cfg cfg;
606
607 parse_nl_config(info, &cfg);
608
609 return fou_destroy(&init_net, &cfg);
610}
611
612static const struct genl_ops fou_nl_ops[] = {
613 {
614 .cmd = FOU_CMD_ADD,
615 .doit = fou_nl_cmd_add_port,
616 .policy = fou_nl_policy,
617 .flags = GENL_ADMIN_PERM,
618 },
619 {
620 .cmd = FOU_CMD_DEL,
621 .doit = fou_nl_cmd_rm_port,
622 .policy = fou_nl_policy,
623 .flags = GENL_ADMIN_PERM,
624 },
625};
626
Tom Herberta8c5f902014-11-12 11:54:09 -0800627size_t fou_encap_hlen(struct ip_tunnel_encap *e)
628{
629 return sizeof(struct udphdr);
630}
631EXPORT_SYMBOL(fou_encap_hlen);
632
633size_t gue_encap_hlen(struct ip_tunnel_encap *e)
634{
635 size_t len;
636 bool need_priv = false;
637
638 len = sizeof(struct udphdr) + sizeof(struct guehdr);
639
640 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
641 len += GUE_PLEN_REMCSUM;
642 need_priv = true;
643 }
644
645 len += need_priv ? GUE_LEN_PRIV : 0;
646
647 return len;
648}
649EXPORT_SYMBOL(gue_encap_hlen);
650
Tom Herbert63487ba2014-11-04 09:06:51 -0800651static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
652 struct flowi4 *fl4, u8 *protocol, __be16 sport)
653{
654 struct udphdr *uh;
655
656 skb_push(skb, sizeof(struct udphdr));
657 skb_reset_transport_header(skb);
658
659 uh = udp_hdr(skb);
660
661 uh->dest = e->dport;
662 uh->source = sport;
663 uh->len = htons(skb->len);
664 uh->check = 0;
665 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
666 fl4->saddr, fl4->daddr, skb->len);
667
668 *protocol = IPPROTO_UDP;
669}
670
671int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
672 u8 *protocol, struct flowi4 *fl4)
673{
674 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
675 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
676 __be16 sport;
677
678 skb = iptunnel_handle_offloads(skb, csum, type);
679
680 if (IS_ERR(skb))
681 return PTR_ERR(skb);
682
683 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
684 skb, 0, 0, false);
685 fou_build_udp(skb, e, fl4, protocol, sport);
686
687 return 0;
688}
689EXPORT_SYMBOL(fou_build_header);
690
691int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
692 u8 *protocol, struct flowi4 *fl4)
693{
694 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
695 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
696 struct guehdr *guehdr;
Tom Herbertb17f7092014-11-04 09:06:56 -0800697 size_t hdrlen, optlen = 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800698 __be16 sport;
Tom Herbert5024c332014-11-04 09:06:53 -0800699 void *data;
700 bool need_priv = false;
701
Tom Herbertb17f7092014-11-04 09:06:56 -0800702 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
703 skb->ip_summed == CHECKSUM_PARTIAL) {
704 csum = false;
705 optlen += GUE_PLEN_REMCSUM;
706 type |= SKB_GSO_TUNNEL_REMCSUM;
707 need_priv = true;
708 }
709
Tom Herbert5024c332014-11-04 09:06:53 -0800710 optlen += need_priv ? GUE_LEN_PRIV : 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800711
712 skb = iptunnel_handle_offloads(skb, csum, type);
713
714 if (IS_ERR(skb))
715 return PTR_ERR(skb);
716
717 /* Get source port (based on flow hash) before skb_push */
718 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
719 skb, 0, 0, false);
720
Tom Herbertb17f7092014-11-04 09:06:56 -0800721 hdrlen = sizeof(struct guehdr) + optlen;
722
723 skb_push(skb, hdrlen);
Tom Herbert63487ba2014-11-04 09:06:51 -0800724
725 guehdr = (struct guehdr *)skb->data;
726
Tom Herbert5024c332014-11-04 09:06:53 -0800727 guehdr->control = 0;
Tom Herbert63487ba2014-11-04 09:06:51 -0800728 guehdr->version = 0;
Tom Herbert5024c332014-11-04 09:06:53 -0800729 guehdr->hlen = optlen >> 2;
Tom Herbert63487ba2014-11-04 09:06:51 -0800730 guehdr->flags = 0;
Tom Herbert5024c332014-11-04 09:06:53 -0800731 guehdr->proto_ctype = *protocol;
732
733 data = &guehdr[1];
734
735 if (need_priv) {
736 __be32 *flags = data;
737
738 guehdr->flags |= GUE_FLAG_PRIV;
739 *flags = 0;
740 data += GUE_LEN_PRIV;
741
Tom Herbertb17f7092014-11-04 09:06:56 -0800742 if (type & SKB_GSO_TUNNEL_REMCSUM) {
743 u16 csum_start = skb_checksum_start_offset(skb);
744 __be16 *pd = data;
745
746 if (csum_start < hdrlen)
747 return -EINVAL;
748
749 csum_start -= hdrlen;
750 pd[0] = htons(csum_start);
751 pd[1] = htons(csum_start + skb->csum_offset);
752
753 if (!skb_is_gso(skb)) {
754 skb->ip_summed = CHECKSUM_NONE;
755 skb->encapsulation = 0;
756 }
757
758 *flags |= GUE_PFLAG_REMCSUM;
759 data += GUE_PLEN_REMCSUM;
760 }
761
Tom Herbert5024c332014-11-04 09:06:53 -0800762 }
Tom Herbert63487ba2014-11-04 09:06:51 -0800763
764 fou_build_udp(skb, e, fl4, protocol, sport);
765
766 return 0;
767}
768EXPORT_SYMBOL(gue_build_header);
769
Tom Herberta8c5f902014-11-12 11:54:09 -0800770#ifdef CONFIG_NET_FOU_IP_TUNNELS
771
772static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
773 .encap_hlen = fou_encap_hlen,
774 .build_header = fou_build_header,
775};
776
777static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
778 .encap_hlen = gue_encap_hlen,
779 .build_header = gue_build_header,
780};
781
782static int ip_tunnel_encap_add_fou_ops(void)
783{
784 int ret;
785
786 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
787 if (ret < 0) {
788 pr_err("can't add fou ops\n");
789 return ret;
790 }
791
792 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
793 if (ret < 0) {
794 pr_err("can't add gue ops\n");
795 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
796 return ret;
797 }
798
799 return 0;
800}
801
802static void ip_tunnel_encap_del_fou_ops(void)
803{
804 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
805 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
806}
807
808#else
809
810static int ip_tunnel_encap_add_fou_ops(void)
811{
812 return 0;
813}
814
Thomas Graf882288c2014-11-13 12:48:21 +0100815static void ip_tunnel_encap_del_fou_ops(void)
Tom Herberta8c5f902014-11-12 11:54:09 -0800816{
817}
818
819#endif
820
Tom Herbert23461552014-09-17 12:25:56 -0700821static int __init fou_init(void)
822{
823 int ret;
824
825 ret = genl_register_family_with_ops(&fou_nl_family,
826 fou_nl_ops);
827
Tom Herberta8c5f902014-11-12 11:54:09 -0800828 if (ret < 0)
829 goto exit;
830
831 ret = ip_tunnel_encap_add_fou_ops();
832 if (ret < 0)
833 genl_unregister_family(&fou_nl_family);
834
835exit:
Tom Herbert23461552014-09-17 12:25:56 -0700836 return ret;
837}
838
839static void __exit fou_fini(void)
840{
841 struct fou *fou, *next;
842
Tom Herberta8c5f902014-11-12 11:54:09 -0800843 ip_tunnel_encap_del_fou_ops();
844
Tom Herbert23461552014-09-17 12:25:56 -0700845 genl_unregister_family(&fou_nl_family);
846
847 /* Close all the FOU sockets */
848
849 spin_lock(&fou_lock);
850 list_for_each_entry_safe(fou, next, &fou_list, list)
851 fou_release(fou);
852 spin_unlock(&fou_lock);
853}
854
855module_init(fou_init);
856module_exit(fou_fini);
857MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
858MODULE_LICENSE("GPL");