blob: e9333147d6f18064a979f970a85dd922eb8f3320 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/msg.c: TIPC message header routines
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloycf2157f2015-03-13 16:08:06 -04004 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
Allan Stephens741de3e2011-01-25 13:33:31 -05005 * Copyright (c) 2005, 2010-2011, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
Ying Xuec93d3ba2015-01-09 15:27:04 +080037#include <net/sock.h>
Per Lidenb97bf3f2006-01-02 19:04:38 +010038#include "core.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "msg.h"
Jon Paul Maloy5a379072014-06-25 20:41:36 -050040#include "addr.h"
41#include "name_table.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -050043#define MAX_FORWARD_SIZE 1024
44
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -050045static unsigned int align(unsigned int i)
Allan Stephens23461e82010-05-11 14:30:18 +000046{
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -050047 return (i + 3) & ~3u;
Allan Stephens23461e82010-05-11 14:30:18 +000048}
49
Ying Xue859fc7c2015-01-09 15:27:01 +080050/**
51 * tipc_buf_acquire - creates a TIPC message buffer
52 * @size: message size (including TIPC header)
53 *
54 * Returns a new buffer with data pointers set to the specified size.
55 *
56 * NOTE: Headroom is reserved to allow prepending of a data link header.
57 * There may also be unrequested tailroom present at the buffer's end.
58 */
59struct sk_buff *tipc_buf_acquire(u32 size)
60{
61 struct sk_buff *skb;
62 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
63
64 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
65 if (skb) {
66 skb_reserve(skb, BUF_HEADROOM);
67 skb_put(skb, size);
68 skb->next = NULL;
69 }
70 return skb;
71}
72
Jon Paul Maloyc5898632015-02-05 08:36:36 -050073void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
74 u32 hsize, u32 dnode)
Allan Stephens23461e82010-05-11 14:30:18 +000075{
76 memset(m, 0, hsize);
77 msg_set_version(m);
78 msg_set_user(m, user);
79 msg_set_hdr_sz(m, hsize);
80 msg_set_size(m, hsize);
Jon Paul Maloyc5898632015-02-05 08:36:36 -050081 msg_set_prevnode(m, own_node);
Allan Stephens23461e82010-05-11 14:30:18 +000082 msg_set_type(m, type);
Jon Paul Maloy1dd0bd22014-08-22 18:09:06 -040083 if (hsize > SHORT_H_SIZE) {
Jon Paul Maloyc5898632015-02-05 08:36:36 -050084 msg_set_orignode(m, own_node);
85 msg_set_destnode(m, dnode);
Jon Paul Maloy1dd0bd22014-08-22 18:09:06 -040086 }
87}
88
Jon Paul Maloyc5898632015-02-05 08:36:36 -050089struct sk_buff *tipc_msg_create(uint user, uint type,
Ying Xue34747532015-01-09 15:27:10 +080090 uint hdr_sz, uint data_sz, u32 dnode,
91 u32 onode, u32 dport, u32 oport, int errcode)
Jon Paul Maloy1dd0bd22014-08-22 18:09:06 -040092{
93 struct tipc_msg *msg;
94 struct sk_buff *buf;
95
96 buf = tipc_buf_acquire(hdr_sz + data_sz);
97 if (unlikely(!buf))
98 return NULL;
99
100 msg = buf_msg(buf);
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500101 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
Jon Paul Maloy1dd0bd22014-08-22 18:09:06 -0400102 msg_set_size(msg, hdr_sz + data_sz);
Jon Paul Maloy1dd0bd22014-08-22 18:09:06 -0400103 msg_set_origport(msg, oport);
104 msg_set_destport(msg, dport);
105 msg_set_errcode(msg, errcode);
106 if (hdr_sz > SHORT_H_SIZE) {
107 msg_set_orignode(msg, onode);
108 msg_set_destnode(msg, dnode);
109 }
110 return buf;
Allan Stephens23461e82010-05-11 14:30:18 +0000111}
112
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400113/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
Jon Paul Maloy29322d0d2014-07-05 13:44:13 -0400114 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
115 * out: set when successful non-complete reassembly, otherwise NULL
116 * @*buf: in: the buffer to append. Always defined
stephen hemmingerb2ad5e52014-10-29 22:58:51 -0700117 * out: head buf after successful complete reassembly, otherwise NULL
Jon Paul Maloy29322d0d2014-07-05 13:44:13 -0400118 * Returns 1 when reassembly complete, otherwise 0
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400119 */
120int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
121{
122 struct sk_buff *head = *headbuf;
123 struct sk_buff *frag = *buf;
Jon Paul Maloyd45ed6c2015-10-19 11:33:00 -0400124 struct sk_buff *tail = NULL;
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400125 struct tipc_msg *msg;
126 u32 fragid;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400127 int delta;
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400128 bool headstolen;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400129
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400130 if (!frag)
131 goto err;
132
133 msg = buf_msg(frag);
134 fragid = msg_type(msg);
135 frag->next = NULL;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400136 skb_pull(frag, msg_hdr_sz(msg));
137
138 if (fragid == FIRST_FRAGMENT) {
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400139 if (unlikely(head))
140 goto err;
141 if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
142 goto err;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400143 head = *headbuf = frag;
Jon Paul Maloy29322d0d2014-07-05 13:44:13 -0400144 *buf = NULL;
Jon Paul Maloyd45ed6c2015-10-19 11:33:00 -0400145 TIPC_SKB_CB(head)->tail = NULL;
146 if (skb_is_nonlinear(head)) {
147 skb_walk_frags(head, tail) {
148 TIPC_SKB_CB(head)->tail = tail;
149 }
150 } else {
151 skb_frag_list_init(head);
152 }
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400153 return 0;
154 }
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400155
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400156 if (!head)
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400157 goto err;
158
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400159 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
160 kfree_skb_partial(frag, headstolen);
161 } else {
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400162 tail = TIPC_SKB_CB(head)->tail;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400163 if (!skb_has_frag_list(head))
164 skb_shinfo(head)->frag_list = frag;
165 else
166 tail->next = frag;
167 head->truesize += frag->truesize;
168 head->data_len += frag->len;
169 head->len += frag->len;
170 TIPC_SKB_CB(head)->tail = frag;
171 }
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400172
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400173 if (fragid == LAST_FRAGMENT) {
Jon Paul Maloy11495572015-03-13 16:08:07 -0400174 TIPC_SKB_CB(head)->validated = false;
175 if (unlikely(!tipc_msg_validate(head)))
176 goto err;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400177 *buf = head;
178 TIPC_SKB_CB(head)->tail = NULL;
179 *headbuf = NULL;
180 return 1;
181 }
182 *buf = NULL;
183 return 0;
Jon Paul Maloy13e9b992014-07-25 14:48:09 -0400184err:
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400185 pr_warn_ratelimited("Unable to build fragment list\n");
186 kfree_skb(*buf);
Jon Paul Maloy29322d0d2014-07-05 13:44:13 -0400187 kfree_skb(*headbuf);
188 *buf = *headbuf = NULL;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400189 return 0;
190}
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500191
Jon Paul Maloycf2157f2015-03-13 16:08:06 -0400192/* tipc_msg_validate - validate basic format of received message
193 *
194 * This routine ensures a TIPC message has an acceptable header, and at least
195 * as much data as the header indicates it should. The routine also ensures
196 * that the entire message header is stored in the main fragment of the message
197 * buffer, to simplify future access to message header fields.
198 *
199 * Note: Having extra info present in the message header or data areas is OK.
200 * TIPC will ignore the excess, under the assumption that it is optional info
201 * introduced by a later release of the protocol.
202 */
203bool tipc_msg_validate(struct sk_buff *skb)
204{
205 struct tipc_msg *msg;
206 int msz, hsz;
207
208 if (unlikely(TIPC_SKB_CB(skb)->validated))
209 return true;
210 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
211 return false;
212
213 hsz = msg_hdr_sz(buf_msg(skb));
214 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
215 return false;
216 if (unlikely(!pskb_may_pull(skb, hsz)))
217 return false;
218
219 msg = buf_msg(skb);
220 if (unlikely(msg_version(msg) != TIPC_VERSION))
221 return false;
222
223 msz = msg_size(msg);
224 if (unlikely(msz < hsz))
225 return false;
226 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
227 return false;
228 if (unlikely(skb->len < msz))
229 return false;
230
231 TIPC_SKB_CB(skb)->validated = true;
232 return true;
233}
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500234
235/**
Jon Paul Maloy9fbfb8b2014-07-16 20:41:03 -0400236 * tipc_msg_build - create buffer chain containing specified header and data
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500237 * @mhdr: Message header, to be prepended to data
Al Viro45dcc682014-11-15 01:16:27 -0500238 * @m: User message
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500239 * @dsz: Total length of user data
240 * @pktmax: Max packet size that can be used
Ying Xuea6ca1092014-11-26 11:41:55 +0800241 * @list: Buffer or chain of buffers to be returned to caller
242 *
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500243 * Returns message data size or errno: -ENOMEM, -EFAULT
244 */
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500245int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
Ying Xue34747532015-01-09 15:27:10 +0800246 int offset, int dsz, int pktmax, struct sk_buff_head *list)
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500247{
248 int mhsz = msg_hdr_sz(mhdr);
249 int msz = mhsz + dsz;
250 int pktno = 1;
251 int pktsz;
252 int pktrem = pktmax;
253 int drem = dsz;
254 struct tipc_msg pkthdr;
Ying Xuea6ca1092014-11-26 11:41:55 +0800255 struct sk_buff *skb;
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500256 char *pktpos;
257 int rc;
Ying Xuea6ca1092014-11-26 11:41:55 +0800258
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500259 msg_set_size(mhdr, msz);
260
261 /* No fragmentation needed? */
262 if (likely(msz <= pktmax)) {
Ying Xuea6ca1092014-11-26 11:41:55 +0800263 skb = tipc_buf_acquire(msz);
264 if (unlikely(!skb))
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500265 return -ENOMEM;
Ying Xuec93d3ba2015-01-09 15:27:04 +0800266 skb_orphan(skb);
Ying Xuea6ca1092014-11-26 11:41:55 +0800267 __skb_queue_tail(list, skb);
268 skb_copy_to_linear_data(skb, mhdr, mhsz);
269 pktpos = skb->data + mhsz;
Al Virof25dcc72014-11-28 15:52:29 -0500270 if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500271 return dsz;
272 rc = -EFAULT;
273 goto error;
274 }
275
276 /* Prepare reusable fragment header */
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500277 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
278 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500279 msg_set_size(&pkthdr, pktmax);
280 msg_set_fragm_no(&pkthdr, pktno);
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -0400281 msg_set_importance(&pkthdr, msg_importance(mhdr));
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500282
283 /* Prepare first fragment */
Ying Xuea6ca1092014-11-26 11:41:55 +0800284 skb = tipc_buf_acquire(pktmax);
285 if (!skb)
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500286 return -ENOMEM;
Ying Xuec93d3ba2015-01-09 15:27:04 +0800287 skb_orphan(skb);
Ying Xuea6ca1092014-11-26 11:41:55 +0800288 __skb_queue_tail(list, skb);
289 pktpos = skb->data;
290 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500291 pktpos += INT_H_SIZE;
292 pktrem -= INT_H_SIZE;
Ying Xuea6ca1092014-11-26 11:41:55 +0800293 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500294 pktpos += mhsz;
295 pktrem -= mhsz;
296
297 do {
298 if (drem < pktrem)
299 pktrem = drem;
300
Al Virof25dcc72014-11-28 15:52:29 -0500301 if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500302 rc = -EFAULT;
303 goto error;
304 }
305 drem -= pktrem;
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500306
307 if (!drem)
308 break;
309
310 /* Prepare new fragment: */
311 if (drem < (pktmax - INT_H_SIZE))
312 pktsz = drem + INT_H_SIZE;
313 else
314 pktsz = pktmax;
Ying Xuea6ca1092014-11-26 11:41:55 +0800315 skb = tipc_buf_acquire(pktsz);
316 if (!skb) {
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500317 rc = -ENOMEM;
318 goto error;
319 }
Ying Xuec93d3ba2015-01-09 15:27:04 +0800320 skb_orphan(skb);
Ying Xuea6ca1092014-11-26 11:41:55 +0800321 __skb_queue_tail(list, skb);
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500322 msg_set_type(&pkthdr, FRAGMENT);
323 msg_set_size(&pkthdr, pktsz);
324 msg_set_fragm_no(&pkthdr, ++pktno);
Ying Xuea6ca1092014-11-26 11:41:55 +0800325 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
326 pktpos = skb->data + INT_H_SIZE;
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500327 pktrem = pktsz - INT_H_SIZE;
328
329 } while (1);
Ying Xuea6ca1092014-11-26 11:41:55 +0800330 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500331 return dsz;
332error:
Ying Xuea6ca1092014-11-26 11:41:55 +0800333 __skb_queue_purge(list);
334 __skb_queue_head_init(list);
Jon Paul Maloy067608e2014-06-25 20:41:34 -0500335 return rc;
336}
337
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500338/**
339 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400340 * @bskb: the buffer to append to ("bundle")
Ying Xue58dc55f2014-11-26 11:41:52 +0800341 * @skb: buffer to be appended
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500342 * @mtu: max allowable size for the bundle buffer
343 * Consumes buffer if successful
344 * Returns true if bundling could be performed, otherwise false
345 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400346bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500347{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400348 struct tipc_msg *bmsg;
Ying Xue58dc55f2014-11-26 11:41:52 +0800349 struct tipc_msg *msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400350 unsigned int bsz;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500351 unsigned int msz = msg_size(msg);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400352 u32 start, pad;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500353 u32 max = mtu - INT_H_SIZE;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500354
355 if (likely(msg_user(msg) == MSG_FRAGMENTER))
356 return false;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400357 if (!bskb)
358 return false;
359 bmsg = buf_msg(bskb);
360 bsz = msg_size(bmsg);
361 start = align(bsz);
362 pad = start - bsz;
363
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400364 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500365 return false;
366 if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
367 return false;
368 if (likely(msg_user(bmsg) != MSG_BUNDLER))
369 return false;
Ying Xue58dc55f2014-11-26 11:41:52 +0800370 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500371 return false;
372 if (unlikely(max < (start + msz)))
373 return false;
374
Ying Xue58dc55f2014-11-26 11:41:52 +0800375 skb_put(bskb, pad + msz);
376 skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500377 msg_set_size(bmsg, start + msz);
378 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
Ying Xue58dc55f2014-11-26 11:41:52 +0800379 kfree_skb(skb);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500380 return true;
381}
382
383/**
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500384 * tipc_msg_extract(): extract bundled inner packet from buffer
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400385 * @skb: buffer to be extracted from.
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500386 * @iskb: extracted inner buffer, to be returned
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400387 * @pos: position in outer message of msg to be extracted.
388 * Returns position of next msg
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500389 * Consumes outer buffer when last packet extracted
390 * Returns true when when there is an extracted buffer, otherwise false
391 */
392bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
393{
Jon Paul Maloy11495572015-03-13 16:08:07 -0400394 struct tipc_msg *msg;
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400395 int imsz, offset;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500396
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400397 *iskb = NULL;
Jon Paul Maloy11495572015-03-13 16:08:07 -0400398 if (unlikely(skb_linearize(skb)))
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500399 goto none;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500400
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400401 msg = buf_msg(skb);
402 offset = msg_hdr_sz(msg) + *pos;
403 if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500404 goto none;
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400405
406 *iskb = skb_clone(skb, GFP_ATOMIC);
407 if (unlikely(!*iskb))
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500408 goto none;
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400409 skb_pull(*iskb, offset);
410 imsz = msg_size(buf_msg(*iskb));
411 skb_trim(*iskb, imsz);
412 if (unlikely(!tipc_msg_validate(*iskb)))
413 goto none;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500414 *pos += align(imsz);
415 return true;
416none:
417 kfree_skb(skb);
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -0400418 kfree_skb(*iskb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500419 *iskb = NULL;
420 return false;
421}
422
423/**
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500424 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
Ying Xue58dc55f2014-11-26 11:41:52 +0800425 * @list: the buffer chain
426 * @skb: buffer to be appended and replaced
427 * @mtu: max allowable size for the bundle buffer, inclusive header
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500428 * @dnode: destination node for message. (Not always present in header)
429 * Replaces buffer if successful
stephen hemmingerb2ad5e52014-10-29 22:58:51 -0700430 * Returns true if success, otherwise false
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500431 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400432bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500433{
Ying Xue58dc55f2014-11-26 11:41:52 +0800434 struct sk_buff *bskb;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500435 struct tipc_msg *bmsg;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400436 struct tipc_msg *msg = buf_msg(*skb);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500437 u32 msz = msg_size(msg);
438 u32 max = mtu - INT_H_SIZE;
439
440 if (msg_user(msg) == MSG_FRAGMENTER)
441 return false;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400442 if (msg_user(msg) == TUNNEL_PROTOCOL)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500443 return false;
444 if (msg_user(msg) == BCAST_PROTOCOL)
445 return false;
446 if (msz > (max / 2))
447 return false;
448
Ying Xue58dc55f2014-11-26 11:41:52 +0800449 bskb = tipc_buf_acquire(max);
450 if (!bskb)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500451 return false;
452
Ying Xue58dc55f2014-11-26 11:41:52 +0800453 skb_trim(bskb, INT_H_SIZE);
454 bmsg = buf_msg(bskb);
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500455 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
456 INT_H_SIZE, dnode);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500457 msg_set_seqno(bmsg, msg_seqno(msg));
458 msg_set_ack(bmsg, msg_ack(msg));
459 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400460 tipc_msg_bundle(bskb, *skb, mtu);
461 *skb = bskb;
462 return true;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500463}
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500464
465/**
466 * tipc_msg_reverse(): swap source and destination addresses and add error code
467 * @buf: buffer containing message to be reversed
468 * @dnode: return value: node where to send message after reversal
469 * @err: error code to be set in message
470 * Consumes buffer if failure
471 * Returns true if success, otherwise false
472 */
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500473bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
Ying Xue34747532015-01-09 15:27:10 +0800474 int err)
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500475{
476 struct tipc_msg *msg = buf_msg(buf);
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500477 struct tipc_msg ohdr;
478 uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
479
Jon Paul Maloyac0074e2014-06-25 20:41:41 -0500480 if (skb_linearize(buf))
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500481 goto exit;
Jon Paul Maloy11495572015-03-13 16:08:07 -0400482 msg = buf_msg(buf);
Jon Paul Maloyac0074e2014-06-25 20:41:41 -0500483 if (msg_dest_droppable(msg))
484 goto exit;
485 if (msg_errcode(msg))
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500486 goto exit;
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500487 memcpy(&ohdr, msg, msg_hdr_sz(msg));
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500488 msg_set_errcode(msg, err);
489 msg_set_origport(msg, msg_destport(&ohdr));
490 msg_set_destport(msg, msg_origport(&ohdr));
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500491 msg_set_prevnode(msg, own_addr);
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500492 if (!msg_short(msg)) {
493 msg_set_orignode(msg, msg_destnode(&ohdr));
494 msg_set_destnode(msg, msg_orignode(&ohdr));
495 }
496 msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
497 skb_trim(buf, msg_size(msg));
498 skb_orphan(buf);
499 *dnode = msg_orignode(&ohdr);
500 return true;
501exit:
502 kfree_skb(buf);
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500503 *dnode = 0;
Jon Paul Maloy8db1bae2014-06-25 20:41:35 -0500504 return false;
505}
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500506
507/**
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500508 * tipc_msg_lookup_dest(): try to find new destination for named message
509 * @skb: the buffer containing the message.
510 * @dnode: return value: next-hop node, if destination found
511 * @err: return value: error code to use, if message to be rejected
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500512 * Does not consume buffer
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500513 * Returns true if a destination is found, false otherwise
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500514 */
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500515bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
516 u32 *dnode, int *err)
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500517{
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500518 struct tipc_msg *msg = buf_msg(skb);
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500519 u32 dport;
Jon Paul Maloyd4829942015-03-27 10:19:19 -0400520 u32 own_addr = tipc_own_addr(net);
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500521
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500522 if (!msg_isdata(msg))
523 return false;
524 if (!msg_named(msg))
525 return false;
Jon Paul Maloyd4829942015-03-27 10:19:19 -0400526 if (msg_errcode(msg))
527 return false;
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500528 *err = -TIPC_ERR_NO_NAME;
529 if (skb_linearize(skb))
530 return false;
Jon Paul Maloyd4829942015-03-27 10:19:19 -0400531 if (msg_reroute_cnt(msg))
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500532 return false;
Ying Xue34747532015-01-09 15:27:10 +0800533 *dnode = addr_domain(net, msg_lookup_scope(msg));
Ying Xue4ac1c8d2015-01-09 15:27:09 +0800534 dport = tipc_nametbl_translate(net, msg_nametype(msg),
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500535 msg_nameinst(msg), dnode);
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500536 if (!dport)
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500537 return false;
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500538 msg_incr_reroute_cnt(msg);
Jon Paul Maloyd4829942015-03-27 10:19:19 -0400539 if (*dnode != own_addr)
540 msg_set_prevnode(msg, own_addr);
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500541 msg_set_destnode(msg, *dnode);
542 msg_set_destport(msg, dport);
Jon Paul Maloye3a77562015-02-05 08:36:39 -0500543 *err = TIPC_OK;
544 return true;
Jon Paul Maloy5a379072014-06-25 20:41:36 -0500545}
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400546
547/* tipc_msg_reassemble() - clone a buffer chain of fragments and
548 * reassemble the clones into one message
549 */
Ying Xuea6ca1092014-11-26 11:41:55 +0800550struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400551{
Ying Xuea6ca1092014-11-26 11:41:55 +0800552 struct sk_buff *skb;
553 struct sk_buff *frag = NULL;
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400554 struct sk_buff *head = NULL;
555 int hdr_sz;
556
557 /* Copy header if single buffer */
Ying Xuea6ca1092014-11-26 11:41:55 +0800558 if (skb_queue_len(list) == 1) {
559 skb = skb_peek(list);
560 hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
561 return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400562 }
563
564 /* Clone all fragments and reassemble */
Ying Xuea6ca1092014-11-26 11:41:55 +0800565 skb_queue_walk(list, skb) {
566 frag = skb_clone(skb, GFP_ATOMIC);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400567 if (!frag)
568 goto error;
569 frag->next = NULL;
570 if (tipc_buf_append(&head, &frag))
571 break;
572 if (!head)
573 goto error;
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400574 }
575 return frag;
576error:
577 pr_warn("Failed do clone local mcast rcv buffer\n");
578 kfree_skb(head);
579 return NULL;
580}