blob: 2ccdb6ffd5c84e668229274852d61db7c41d3c39 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04004 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010045
Ying Xue796c75d2013-06-17 10:54:48 -040046#include <linux/pkt_sched.h>
47
Erik Hugne2cf8aa12012-06-29 00:16:37 -040048/*
49 * Error message prefixes
50 */
51static const char *link_co_err = "Link changeover error, ";
52static const char *link_rst_msg = "Resetting link ";
53static const char *link_unk_evt = "Unknown link event ";
Per Lidenb97bf3f2006-01-02 19:04:38 +010054
Richard Alpe7be57fc2014-11-20 10:29:12 +010055static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69};
70
Richard Alpe0655f6a2014-11-20 10:29:07 +010071/* Properties valid for media, bearar and link */
72static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
77};
78
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090079/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -040080 * Interval between NACKs when packets arrive out of order
81 */
82#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
83/*
Allan Stephensa686e682008-06-04 17:29:39 -070084 * Out-of-range value for link session numbers
85 */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040086#define WILDCARD_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -070087
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040088/* State value stored in 'failover_pkts'
Per Lidenb97bf3f2006-01-02 19:04:38 +010089 */
Jon Paul Maloydff29b12015-04-02 09:33:01 -040090#define FIRST_FAILOVER 0xffffu
Per Lidenb97bf3f2006-01-02 19:04:38 +010091
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040092/* Link FSM states and events:
93 */
94enum {
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -040095 TIPC_LINK_WORKING,
96 TIPC_LINK_PROBING,
97 TIPC_LINK_RESETTING,
98 TIPC_LINK_ESTABLISHING
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040099};
100
101enum {
102 PEER_RESET_EVT = RESET_MSG,
103 ACTIVATE_EVT = ACTIVATE_MSG,
104 TRAFFIC_EVT, /* Any other valid msg from peer */
105 SILENCE_EVT /* Peer was silent during last timer interval*/
106};
107
108/* Link FSM state checking routines
109 */
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400110static int link_working(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400111{
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400112 return l->state == TIPC_LINK_WORKING;
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400113}
114
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400115static int link_probing(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400116{
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400117 return l->state == TIPC_LINK_PROBING;
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400118}
119
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400120static int link_resetting(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400121{
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400122 return l->state == TIPC_LINK_RESETTING;
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400123}
124
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400125static int link_establishing(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400126{
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400127 return l->state == TIPC_LINK_ESTABLISHING;
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400128}
129
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400130static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
131 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400132static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
133 u16 rcvgap, int tolerance, int priority,
134 struct sk_buff_head *xmitq);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500135static void link_reset_statistics(struct tipc_link *l_ptr);
136static void link_print(struct tipc_link *l_ptr, const char *str);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400137static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
138 struct sk_buff_head *xmitq);
Ying Xue247f0f32014-02-18 16:06:46 +0800139static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400140static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500141static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400142static int tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400143
Per Lidenb97bf3f2006-01-02 19:04:38 +0100144/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800145 * Simple link routines
Per Lidenb97bf3f2006-01-02 19:04:38 +0100146 */
Sam Ravnborg05790c62006-03-20 22:37:04 -0800147static unsigned int align(unsigned int i)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100148{
149 return (i + 3) & ~3u;
150}
151
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400152static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
153{
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400154 struct tipc_node *n = l->owner;
155
156 if (node_active_link(n, 0) != l)
157 return node_active_link(n, 0);
158 return node_active_link(n, 1);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400159}
160
Per Lidenb97bf3f2006-01-02 19:04:38 +0100161/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800162 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100163 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500164int tipc_link_is_up(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100165{
166 if (!l_ptr)
167 return 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400168 return link_working(l_ptr) || link_probing(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100169}
170
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400171int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100172{
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400173 struct tipc_node *n = l->owner;
174
175 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100176}
177
178/**
Per Liden4323add2006-01-18 00:38:21 +0100179 * tipc_link_create - create a new link
Allan Stephens37b9c082011-02-28 11:32:27 -0500180 * @n_ptr: pointer to associated node
Per Lidenb97bf3f2006-01-02 19:04:38 +0100181 * @b_ptr: pointer to associated bearer
Per Lidenb97bf3f2006-01-02 19:04:38 +0100182 * @media_addr: media address to use when sending messages over link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900183 *
Per Lidenb97bf3f2006-01-02 19:04:38 +0100184 * Returns pointer to link.
185 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500186struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
Ying Xuec61dd612014-02-13 17:29:09 -0500187 struct tipc_bearer *b_ptr,
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400188 const struct tipc_media_addr *media_addr,
189 struct sk_buff_head *inputq,
190 struct sk_buff_head *namedq)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100191{
Ying Xue34747532015-01-09 15:27:10 +0800192 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500193 struct tipc_link *l_ptr;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100194 struct tipc_msg *msg;
195 char *if_name;
Allan Stephens37b9c082011-02-28 11:32:27 -0500196 char addr_string[16];
197 u32 peer = n_ptr->addr;
198
Holger Brunck0372bf52014-11-14 18:33:19 +0100199 if (n_ptr->link_cnt >= MAX_BEARERS) {
Allan Stephens37b9c082011-02-28 11:32:27 -0500200 tipc_addr_string_fill(addr_string, n_ptr->addr);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400201 pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
202 n_ptr->link_cnt, addr_string, MAX_BEARERS);
Allan Stephens37b9c082011-02-28 11:32:27 -0500203 return NULL;
204 }
205
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400206 if (n_ptr->links[b_ptr->identity].link) {
Allan Stephens37b9c082011-02-28 11:32:27 -0500207 tipc_addr_string_fill(addr_string, n_ptr->addr);
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400208 pr_err("Attempt to establish second link on <%s> to %s\n",
209 b_ptr->name, addr_string);
Allan Stephens37b9c082011-02-28 11:32:27 -0500210 return NULL;
211 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100212
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700213 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100214 if (!l_ptr) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400215 pr_warn("Link creation failed, no memory\n");
Per Lidenb97bf3f2006-01-02 19:04:38 +0100216 return NULL;
217 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100218 l_ptr->addr = peer;
Allan Stephens2d627b92011-01-07 13:00:11 -0500219 if_name = strchr(b_ptr->name, ':') + 1;
Allan Stephens062b4c92011-04-07 09:28:47 -0400220 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
Ying Xue34747532015-01-09 15:27:10 +0800221 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
222 tipc_node(tn->own_addr),
Per Lidenb97bf3f2006-01-02 19:04:38 +0100223 if_name,
224 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Allan Stephens062b4c92011-04-07 09:28:47 -0400225 /* note: peer i/f name is updated by reset/activate message */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100226 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
Allan Stephens37b9c082011-02-28 11:32:27 -0500227 l_ptr->owner = n_ptr;
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400228 l_ptr->peer_session = WILDCARD_SESSION;
Ying Xue7a2f7d12014-04-21 10:55:46 +0800229 l_ptr->bearer_id = b_ptr->identity;
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400230 l_ptr->tolerance = b_ptr->tolerance;
Jon Paul Maloycbeb83c2015-07-30 18:24:15 -0400231 l_ptr->snd_nxt = 1;
232 l_ptr->rcv_nxt = 1;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400233 l_ptr->state = TIPC_LINK_RESETTING;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100234
235 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
236 msg = l_ptr->pmsg;
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500237 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
Ying Xue34747532015-01-09 15:27:10 +0800238 l_ptr->addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100239 msg_set_size(msg, sizeof(l_ptr->proto_msg));
Ying Xuebafa29e2015-01-09 15:27:12 +0800240 msg_set_session(msg, (tn->random & 0xffff));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100241 msg_set_bearer_id(msg, b_ptr->identity);
242 strcpy((char *)msg_data(msg), if_name);
Ying Xue7a2f7d12014-04-21 10:55:46 +0800243 l_ptr->net_plane = b_ptr->net_plane;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400244 l_ptr->advertised_mtu = b_ptr->mtu;
245 l_ptr->mtu = l_ptr->advertised_mtu;
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -0400246 l_ptr->priority = b_ptr->priority;
247 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400248 l_ptr->snd_nxt = 1;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400249 __skb_queue_head_init(&l_ptr->transmq);
250 __skb_queue_head_init(&l_ptr->backlogq);
251 __skb_queue_head_init(&l_ptr->deferdq);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500252 skb_queue_head_init(&l_ptr->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400253 l_ptr->inputq = inputq;
254 l_ptr->namedq = namedq;
255 skb_queue_head_init(l_ptr->inputq);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256 link_reset_statistics(l_ptr);
Allan Stephens37b9c082011-02-28 11:32:27 -0500257 tipc_node_attach_link(n_ptr, l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100258 return l_ptr;
259}
260
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400261/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
262 *
263 * Give a newly added peer node the sequence number where it should
264 * start receiving and acking broadcast packets.
265 */
266static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
267 struct sk_buff_head *xmitq)
268{
269 struct sk_buff *skb;
270 struct sk_buff_head list;
Jon Maloy5a4c3552015-07-29 18:28:01 -0400271 u16 last_sent;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400272
273 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
274 0, l->addr, link_own_addr(l), 0, 0, 0);
275 if (!skb)
276 return;
Jon Maloy5a4c3552015-07-29 18:28:01 -0400277 last_sent = tipc_bclink_get_last_sent(l->owner->net);
278 msg_set_last_bcast(buf_msg(skb), last_sent);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400279 __skb_queue_head_init(&list);
280 __skb_queue_tail(&list, skb);
281 tipc_link_xmit(l, &list, xmitq);
282}
283
Per Lidenb97bf3f2006-01-02 19:04:38 +0100284/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400285 * tipc_link_fsm_evt - link finite state machine
286 * @l: pointer to link
287 * @evt: state machine event to be processed
288 * @xmitq: queue to prepend created protocol message, if any
289 */
290static int tipc_link_fsm_evt(struct tipc_link *l, int evt,
291 struct sk_buff_head *xmitq)
292{
293 int mtyp = 0, rc = 0;
294 struct tipc_link *pl;
295 enum {
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400296 LINK_RESET = 1,
297 LINK_ACTIVATE = (1 << 1),
298 SND_PROBE = (1 << 2),
299 SND_STATE = (1 << 3),
300 SND_RESET = (1 << 4),
301 SND_ACTIVATE = (1 << 5),
302 SND_BCAST_SYNC = (1 << 6)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400303 } actions = 0;
304
305 if (l->exec_mode == TIPC_LINK_BLOCKED)
306 return rc;
307
308 switch (l->state) {
309 case TIPC_LINK_WORKING:
310 switch (evt) {
311 case TRAFFIC_EVT:
312 case ACTIVATE_EVT:
313 break;
314 case SILENCE_EVT:
315 l->state = TIPC_LINK_PROBING;
316 actions |= SND_PROBE;
317 break;
318 case PEER_RESET_EVT:
319 actions |= LINK_RESET | SND_ACTIVATE;
320 break;
321 default:
322 pr_debug("%s%u WORKING\n", link_unk_evt, evt);
323 }
324 break;
325 case TIPC_LINK_PROBING:
326 switch (evt) {
327 case TRAFFIC_EVT:
328 case ACTIVATE_EVT:
329 l->state = TIPC_LINK_WORKING;
330 break;
331 case PEER_RESET_EVT:
332 actions |= LINK_RESET | SND_ACTIVATE;
333 break;
334 case SILENCE_EVT:
335 if (l->silent_intv_cnt <= l->abort_limit) {
336 actions |= SND_PROBE;
337 break;
338 }
339 actions |= LINK_RESET | SND_RESET;
340 break;
341 default:
342 pr_err("%s%u PROBING\n", link_unk_evt, evt);
343 }
344 break;
345 case TIPC_LINK_RESETTING:
346 switch (evt) {
347 case TRAFFIC_EVT:
348 break;
349 case ACTIVATE_EVT:
350 pl = node_active_link(l->owner, 0);
351 if (pl && link_probing(pl))
352 break;
Jon Paul Maloycbeb83c2015-07-30 18:24:15 -0400353 l->state = TIPC_LINK_WORKING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400354 actions |= LINK_ACTIVATE;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400355 if (!l->owner->working_links)
356 actions |= SND_BCAST_SYNC;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400357 break;
358 case PEER_RESET_EVT:
359 l->state = TIPC_LINK_ESTABLISHING;
360 actions |= SND_ACTIVATE;
361 break;
362 case SILENCE_EVT:
363 actions |= SND_RESET;
364 break;
365 default:
366 pr_err("%s%u in RESETTING\n", link_unk_evt, evt);
367 }
368 break;
369 case TIPC_LINK_ESTABLISHING:
370 switch (evt) {
371 case TRAFFIC_EVT:
372 case ACTIVATE_EVT:
373 pl = node_active_link(l->owner, 0);
374 if (pl && link_probing(pl))
375 break;
Jon Paul Maloycbeb83c2015-07-30 18:24:15 -0400376 l->state = TIPC_LINK_WORKING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400377 actions |= LINK_ACTIVATE;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400378 if (!l->owner->working_links)
379 actions |= SND_BCAST_SYNC;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400380 break;
381 case PEER_RESET_EVT:
382 break;
383 case SILENCE_EVT:
384 actions |= SND_ACTIVATE;
385 break;
386 default:
387 pr_err("%s%u ESTABLISHING\n", link_unk_evt, evt);
388 }
389 break;
390 default:
391 pr_err("Unknown link state %u/%u\n", l->state, evt);
392 }
393
394 /* Perform actions as decided by FSM */
395 if (actions & LINK_RESET) {
396 l->exec_mode = TIPC_LINK_BLOCKED;
397 rc |= TIPC_LINK_DOWN_EVT;
398 }
399 if (actions & LINK_ACTIVATE) {
400 l->exec_mode = TIPC_LINK_OPEN;
401 rc |= TIPC_LINK_UP_EVT;
402 }
403 if (actions & (SND_STATE | SND_PROBE))
404 mtyp = STATE_MSG;
405 if (actions & SND_RESET)
406 mtyp = RESET_MSG;
407 if (actions & SND_ACTIVATE)
408 mtyp = ACTIVATE_MSG;
409 if (actions & (SND_PROBE | SND_STATE | SND_RESET | SND_ACTIVATE))
410 tipc_link_build_proto_msg(l, mtyp, actions & SND_PROBE,
411 0, 0, 0, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400412 if (actions & SND_BCAST_SYNC)
413 tipc_link_build_bcast_sync_msg(l, xmitq);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400414 return rc;
415}
416
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400417/* link_profile_stats - update statistical profiling of traffic
418 */
419static void link_profile_stats(struct tipc_link *l)
420{
421 struct sk_buff *skb;
422 struct tipc_msg *msg;
423 int length;
424
425 /* Update counters used in statistical profiling of send traffic */
426 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
427 l->stats.queue_sz_counts++;
428
429 skb = skb_peek(&l->transmq);
430 if (!skb)
431 return;
432 msg = buf_msg(skb);
433 length = msg_size(msg);
434
435 if (msg_user(msg) == MSG_FRAGMENTER) {
436 if (msg_type(msg) != FIRST_FRAGMENT)
437 return;
438 length = msg_size(msg_get_wrapped(msg));
439 }
440 l->stats.msg_lengths_total += length;
441 l->stats.msg_length_counts++;
442 if (length <= 64)
443 l->stats.msg_length_profile[0]++;
444 else if (length <= 256)
445 l->stats.msg_length_profile[1]++;
446 else if (length <= 1024)
447 l->stats.msg_length_profile[2]++;
448 else if (length <= 4096)
449 l->stats.msg_length_profile[3]++;
450 else if (length <= 16384)
451 l->stats.msg_length_profile[4]++;
452 else if (length <= 32768)
453 l->stats.msg_length_profile[5]++;
454 else
455 l->stats.msg_length_profile[6]++;
456}
457
458/* tipc_link_timeout - perform periodic task as instructed from node timeout
459 */
460int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
461{
462 int rc = 0;
463
464 link_profile_stats(l);
465 if (l->silent_intv_cnt)
466 rc = tipc_link_fsm_evt(l, SILENCE_EVT, xmitq);
467 else if (link_working(l) && tipc_bclink_acks_missing(l->owner))
468 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
469 l->silent_intv_cnt++;
470 return rc;
471}
472
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400473/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400474 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400475 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400476 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400477 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400478 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100479 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400480static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100481{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400482 struct tipc_msg *msg = buf_msg(skb_peek(list));
483 int imp = msg_importance(msg);
484 u32 oport = msg_origport(msg);
485 u32 addr = link_own_addr(link);
486 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100487
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400488 /* This really cannot happen... */
489 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
490 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400491 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400492 }
493 /* Non-blocking sender: */
494 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
495 return -ELINKCONG;
496
497 /* Create and schedule wakeup pseudo message */
498 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
499 addr, addr, oport, 0, 0);
500 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400501 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400502 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
503 TIPC_SKB_CB(skb)->chain_imp = imp;
504 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400505 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400506 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100507}
508
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400509/**
510 * link_prepare_wakeup - prepare users for wakeup after congestion
511 * @link: congested link
512 * Move a number of waiting users, as permitted by available space in
513 * the send queue, from link wait queue to node wait queue for wakeup
514 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400515void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100516{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400517 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
518 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800519 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100520
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400521 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
522 imp = TIPC_SKB_CB(skb)->chain_imp;
523 lim = l->window + l->backlog[imp].limit;
524 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
525 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100526 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400527 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400528 skb_queue_tail(l->inputq, skb);
529 l->owner->inputq = l->inputq;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400530 l->owner->action_flags |= TIPC_MSG_EVT;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100531 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100532}
533
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900534/**
Per Liden4323add2006-01-18 00:38:21 +0100535 * tipc_link_reset_fragments - purge link's inbound message fragments queue
Per Lidenb97bf3f2006-01-02 19:04:38 +0100536 * @l_ptr: pointer to link
537 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500538void tipc_link_reset_fragments(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100539{
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400540 kfree_skb(l_ptr->reasm_buf);
541 l_ptr->reasm_buf = NULL;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100542}
543
Jon Paul Maloy7d967b62015-06-28 09:44:44 -0400544void tipc_link_purge_backlog(struct tipc_link *l)
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400545{
546 __skb_queue_purge(&l->backlogq);
547 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
548 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
549 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
550 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
551 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
552}
553
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900554/**
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500555 * tipc_link_purge_queues - purge all pkt queues associated with link
Per Lidenb97bf3f2006-01-02 19:04:38 +0100556 * @l_ptr: pointer to link
557 */
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500558void tipc_link_purge_queues(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100559{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400560 __skb_queue_purge(&l_ptr->deferdq);
561 __skb_queue_purge(&l_ptr->transmq);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400562 tipc_link_purge_backlog(l_ptr);
Per Liden4323add2006-01-18 00:38:21 +0100563 tipc_link_reset_fragments(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100564}
565
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500566void tipc_link_reset(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100567{
Per Lidenb97bf3f2006-01-02 19:04:38 +0100568 u32 prev_state = l_ptr->state;
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400569 struct tipc_node *owner = l_ptr->owner;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400570 struct tipc_link *pl = tipc_parallel_link(l_ptr);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900571
Allan Stephensa686e682008-06-04 17:29:39 -0700572 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100573
Allan Stephensa686e682008-06-04 17:29:39 -0700574 /* Link is down, accept any session */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400575 l_ptr->peer_session = WILDCARD_SESSION;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100576
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400577 /* Prepare for renewed mtu size negotiation */
578 l_ptr->mtu = l_ptr->advertised_mtu;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900579
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400580 l_ptr->state = TIPC_LINK_RESETTING;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100581
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400582 if ((prev_state == TIPC_LINK_RESETTING) ||
583 (prev_state == TIPC_LINK_ESTABLISHING))
Per Lidenb97bf3f2006-01-02 19:04:38 +0100584 return;
585
Jon Paul Maloy655fb242015-07-30 18:24:17 -0400586 if (tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400587 l_ptr->exec_mode = TIPC_LINK_BLOCKED;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400588 l_ptr->failover_checkpt = l_ptr->rcv_nxt;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400589 pl->failover_pkts = FIRST_FAILOVER;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400590 pl->failover_checkpt = l_ptr->rcv_nxt;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400591 pl->failover_skb = l_ptr->reasm_buf;
592 } else {
593 kfree_skb(l_ptr->reasm_buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100594 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500595 /* Clean up all queues, except inputq: */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400596 __skb_queue_purge(&l_ptr->transmq);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400597 __skb_queue_purge(&l_ptr->deferdq);
Jon Paul Maloye6441ba2015-03-09 16:16:22 -0400598 if (!owner->inputq)
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400599 owner->inputq = l_ptr->inputq;
Jon Paul Maloye6441ba2015-03-09 16:16:22 -0400600 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
601 if (!skb_queue_empty(owner->inputq))
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500602 owner->action_flags |= TIPC_MSG_EVT;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400603 tipc_link_purge_backlog(l_ptr);
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400604 l_ptr->reasm_buf = NULL;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400605 l_ptr->rcv_unacked = 0;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400606 l_ptr->snd_nxt = 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400607 l_ptr->rcv_nxt = 1;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400608 l_ptr->silent_intv_cnt = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400609 l_ptr->stats.recv_info = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100610 l_ptr->stale_count = 0;
611 link_reset_statistics(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100612}
613
Per Lidenb97bf3f2006-01-02 19:04:38 +0100614/**
Jon Paul Maloy9fbfb8b2014-07-16 20:41:03 -0400615 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500616 * @link: link to use
Ying Xuea6ca1092014-11-26 11:41:55 +0800617 * @list: chain of buffers containing message
618 *
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400619 * Consumes the buffer chain, except when returning an error code,
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400620 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
621 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500622 */
Ying Xue7f9f95d2015-01-09 15:27:06 +0800623int __tipc_link_xmit(struct net *net, struct tipc_link *link,
624 struct sk_buff_head *list)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500625{
Ying Xuea6ca1092014-11-26 11:41:55 +0800626 struct tipc_msg *msg = buf_msg(skb_peek(list));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400627 unsigned int maxwin = link->window;
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400628 unsigned int i, imp = msg_importance(msg);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400629 uint mtu = link->mtu;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400630 u16 ack = mod(link->rcv_nxt - 1);
631 u16 seqno = link->snd_nxt;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -0400632 u16 bc_last_in = link->owner->bclink.last_in;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500633 struct tipc_media_addr *addr = &link->media_addr;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400634 struct sk_buff_head *transmq = &link->transmq;
635 struct sk_buff_head *backlogq = &link->backlogq;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400636 struct sk_buff *skb, *bskb;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500637
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400638 /* Match msg importance against this and all higher backlog limits: */
639 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
640 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
641 return link_schedule_user(link, list);
642 }
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400643 if (unlikely(msg_size(msg) > mtu))
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500644 return -EMSGSIZE;
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400645
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400646 /* Prepare each packet for sending, and add to relevant queue: */
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400647 while (skb_queue_len(list)) {
648 skb = skb_peek(list);
Ying Xue58dc55f2014-11-26 11:41:52 +0800649 msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400650 msg_set_seqno(msg, seqno);
651 msg_set_ack(msg, ack);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500652 msg_set_bcast_ack(msg, bc_last_in);
653
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400654 if (likely(skb_queue_len(transmq) < maxwin)) {
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400655 __skb_dequeue(list);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400656 __skb_queue_tail(transmq, skb);
657 tipc_bearer_send(net, link->bearer_id, skb, addr);
658 link->rcv_unacked = 0;
659 seqno++;
660 continue;
661 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400662 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
663 kfree_skb(__skb_dequeue(list));
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500664 link->stats.sent_bundled++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500665 continue;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400666 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400667 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
668 kfree_skb(__skb_dequeue(list));
669 __skb_queue_tail(backlogq, bskb);
670 link->backlog[msg_importance(buf_msg(bskb))].len++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500671 link->stats.sent_bundled++;
672 link->stats.sent_bundles++;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400673 continue;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500674 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400675 link->backlog[imp].len += skb_queue_len(list);
676 skb_queue_splice_tail_init(list, backlogq);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500677 }
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400678 link->snd_nxt = seqno;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500679 return 0;
680}
681
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400682/**
683 * tipc_link_xmit(): enqueue buffer list according to queue situation
684 * @link: link to use
685 * @list: chain of buffers containing message
686 * @xmitq: returned list of packets to be sent by caller
687 *
688 * Consumes the buffer chain, except when returning -ELINKCONG,
689 * since the caller then may want to make more send attempts.
690 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
691 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
692 */
693int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
694 struct sk_buff_head *xmitq)
695{
696 struct tipc_msg *hdr = buf_msg(skb_peek(list));
697 unsigned int maxwin = l->window;
698 unsigned int i, imp = msg_importance(hdr);
699 unsigned int mtu = l->mtu;
700 u16 ack = l->rcv_nxt - 1;
701 u16 seqno = l->snd_nxt;
702 u16 bc_last_in = l->owner->bclink.last_in;
703 struct sk_buff_head *transmq = &l->transmq;
704 struct sk_buff_head *backlogq = &l->backlogq;
705 struct sk_buff *skb, *_skb, *bskb;
706
707 /* Match msg importance against this and all higher backlog limits: */
708 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
709 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
710 return link_schedule_user(l, list);
711 }
712 if (unlikely(msg_size(hdr) > mtu))
713 return -EMSGSIZE;
714
715 /* Prepare each packet for sending, and add to relevant queue: */
716 while (skb_queue_len(list)) {
717 skb = skb_peek(list);
718 hdr = buf_msg(skb);
719 msg_set_seqno(hdr, seqno);
720 msg_set_ack(hdr, ack);
721 msg_set_bcast_ack(hdr, bc_last_in);
722
723 if (likely(skb_queue_len(transmq) < maxwin)) {
724 _skb = skb_clone(skb, GFP_ATOMIC);
725 if (!_skb)
726 return -ENOBUFS;
727 __skb_dequeue(list);
728 __skb_queue_tail(transmq, skb);
729 __skb_queue_tail(xmitq, _skb);
730 l->rcv_unacked = 0;
731 seqno++;
732 continue;
733 }
734 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
735 kfree_skb(__skb_dequeue(list));
736 l->stats.sent_bundled++;
737 continue;
738 }
739 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
740 kfree_skb(__skb_dequeue(list));
741 __skb_queue_tail(backlogq, bskb);
742 l->backlog[msg_importance(buf_msg(bskb))].len++;
743 l->stats.sent_bundled++;
744 l->stats.sent_bundles++;
745 continue;
746 }
747 l->backlog[imp].len += skb_queue_len(list);
748 skb_queue_splice_tail_init(list, backlogq);
749 }
750 l->snd_nxt = seqno;
751 return 0;
752}
753
Ying Xuea6ca1092014-11-26 11:41:55 +0800754static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
755{
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500756 skb_queue_head_init(list);
Ying Xuea6ca1092014-11-26 11:41:55 +0800757 __skb_queue_tail(list, skb);
758}
759
760static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
761{
762 struct sk_buff_head head;
763
764 skb2list(skb, &head);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800765 return __tipc_link_xmit(link->owner->net, link, &head);
Ying Xuea6ca1092014-11-26 11:41:55 +0800766}
767
Jon Maloyc64f7a62012-11-16 13:51:31 +0800768/*
Ying Xue247f0f32014-02-18 16:06:46 +0800769 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
Jon Maloyc64f7a62012-11-16 13:51:31 +0800770 * Receive the sequence number where we should start receiving and
771 * acking broadcast packets from a newly added peer node, and open
772 * up for reception of such packets.
773 *
774 * Called with node locked
775 */
Ying Xue247f0f32014-02-18 16:06:46 +0800776static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
Jon Maloyc64f7a62012-11-16 13:51:31 +0800777{
778 struct tipc_msg *msg = buf_msg(buf);
779
780 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
781 n->bclink.recv_permitted = true;
782 kfree_skb(buf);
783}
784
785/*
Ying Xue47b4c9a2014-11-26 11:41:48 +0800786 * tipc_link_push_packets - push unsent packets to bearer
787 *
788 * Push out the unsent messages of a link where congestion
789 * has abated. Node is locked.
790 *
791 * Called with node locked
Per Lidenb97bf3f2006-01-02 19:04:38 +0100792 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400793void tipc_link_push_packets(struct tipc_link *link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100794{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400795 struct sk_buff *skb;
Ying Xue47b4c9a2014-11-26 11:41:48 +0800796 struct tipc_msg *msg;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400797 u16 seqno = link->snd_nxt;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400798 u16 ack = mod(link->rcv_nxt - 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100799
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400800 while (skb_queue_len(&link->transmq) < link->window) {
801 skb = __skb_dequeue(&link->backlogq);
802 if (!skb)
Ying Xue47b4c9a2014-11-26 11:41:48 +0800803 break;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400804 msg = buf_msg(skb);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400805 link->backlog[msg_importance(msg)].len--;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400806 msg_set_ack(msg, ack);
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400807 msg_set_seqno(msg, seqno);
808 seqno = mod(seqno + 1);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400809 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
810 link->rcv_unacked = 0;
811 __skb_queue_tail(&link->transmq, skb);
812 tipc_bearer_send(link->owner->net, link->bearer_id,
813 skb, &link->media_addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100814 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400815 link->snd_nxt = seqno;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100816}
817
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400818void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
819{
820 struct sk_buff *skb, *_skb;
821 struct tipc_msg *hdr;
822 u16 seqno = l->snd_nxt;
823 u16 ack = l->rcv_nxt - 1;
824
825 while (skb_queue_len(&l->transmq) < l->window) {
826 skb = skb_peek(&l->backlogq);
827 if (!skb)
828 break;
829 _skb = skb_clone(skb, GFP_ATOMIC);
830 if (!_skb)
831 break;
832 __skb_dequeue(&l->backlogq);
833 hdr = buf_msg(skb);
834 l->backlog[msg_importance(hdr)].len--;
835 __skb_queue_tail(&l->transmq, skb);
836 __skb_queue_tail(xmitq, _skb);
837 msg_set_ack(hdr, ack);
838 msg_set_seqno(hdr, seqno);
839 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
840 l->rcv_unacked = 0;
841 seqno++;
842 }
843 l->snd_nxt = seqno;
844}
845
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500846static void link_retransmit_failure(struct tipc_link *l_ptr,
Paul Gortmakerae8509c2013-06-17 10:54:47 -0400847 struct sk_buff *buf)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700848{
849 struct tipc_msg *msg = buf_msg(buf);
Ying Xue1da46562015-01-09 15:27:07 +0800850 struct net *net = l_ptr->owner->net;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700851
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400852 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700853
854 if (l_ptr->addr) {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700855 /* Handle failure on standard link */
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400856 link_print(l_ptr, "Resetting link ");
857 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
858 msg_user(msg), msg_type(msg), msg_size(msg),
859 msg_errcode(msg));
860 pr_info("sqno %u, prev: %x, src: %x\n",
861 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700862 } else {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700863 /* Handle failure on broadcast link */
David S. Miller6c000552008-09-02 23:38:32 -0700864 struct tipc_node *n_ptr;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700865 char addr_string[16];
866
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400867 pr_info("Msg seq number: %u, ", msg_seqno(msg));
868 pr_cont("Outstanding acks: %lu\n",
869 (unsigned long) TIPC_SKB_CB(buf)->handle);
Jeff Garzik617dbea2006-10-03 16:25:34 -0700870
Ying Xue1da46562015-01-09 15:27:07 +0800871 n_ptr = tipc_bclink_retransmit_to(net);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700872
Allan Stephensc68ca7b2010-05-11 14:30:12 +0000873 tipc_addr_string_fill(addr_string, n_ptr->addr);
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400874 pr_info("Broadcast link info for %s\n", addr_string);
Ying Xue389dd9b2012-11-16 13:51:30 +0800875 pr_info("Reception permitted: %d, Acked: %u\n",
876 n_ptr->bclink.recv_permitted,
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400877 n_ptr->bclink.acked);
878 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
879 n_ptr->bclink.last_in,
880 n_ptr->bclink.oos_state,
881 n_ptr->bclink.last_sent);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700882
Ying Xueb952b2b2015-03-26 18:10:23 +0800883 n_ptr->action_flags |= TIPC_BCAST_RESET;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700884 l_ptr->stale_count = 0;
885 }
886}
887
Ying Xue58dc55f2014-11-26 11:41:52 +0800888void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
Per Liden4323add2006-01-18 00:38:21 +0100889 u32 retransmits)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100890{
891 struct tipc_msg *msg;
892
Ying Xue58dc55f2014-11-26 11:41:52 +0800893 if (!skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700894 return;
895
Ying Xue58dc55f2014-11-26 11:41:52 +0800896 msg = buf_msg(skb);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900897
Erik Hugne512137e2013-12-06 10:08:00 -0500898 /* Detect repeated retransmit failures */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400899 if (l_ptr->last_retransm == msg_seqno(msg)) {
Erik Hugne512137e2013-12-06 10:08:00 -0500900 if (++l_ptr->stale_count > 100) {
Ying Xue58dc55f2014-11-26 11:41:52 +0800901 link_retransmit_failure(l_ptr, skb);
Erik Hugne512137e2013-12-06 10:08:00 -0500902 return;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700903 }
904 } else {
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400905 l_ptr->last_retransm = msg_seqno(msg);
Erik Hugne512137e2013-12-06 10:08:00 -0500906 l_ptr->stale_count = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100907 }
Allan Stephensd356eeb2006-06-25 23:40:01 -0700908
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400909 skb_queue_walk_from(&l_ptr->transmq, skb) {
910 if (!retransmits)
Ying Xue58dc55f2014-11-26 11:41:52 +0800911 break;
912 msg = buf_msg(skb);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400913 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900914 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800915 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
916 &l_ptr->media_addr);
Ying Xue3c294cb2012-11-15 11:34:45 +0800917 retransmits--;
918 l_ptr->stats.retransmitted++;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100919 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100920}
921
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400922static int tipc_link_retransm(struct tipc_link *l, int retransm,
923 struct sk_buff_head *xmitq)
924{
925 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
926 struct tipc_msg *hdr;
927
928 if (!skb)
929 return 0;
930
931 /* Detect repeated retransmit failures on same packet */
932 if (likely(l->last_retransm != buf_seqno(skb))) {
933 l->last_retransm = buf_seqno(skb);
934 l->stale_count = 1;
935 } else if (++l->stale_count > 100) {
936 link_retransmit_failure(l, skb);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400937 l->exec_mode = TIPC_LINK_BLOCKED;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400938 return TIPC_LINK_DOWN_EVT;
939 }
940 skb_queue_walk(&l->transmq, skb) {
941 if (!retransm)
942 return 0;
943 hdr = buf_msg(skb);
944 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
945 if (!_skb)
946 return 0;
947 hdr = buf_msg(_skb);
948 msg_set_ack(hdr, l->rcv_nxt - 1);
949 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
950 _skb->priority = TC_PRIO_CONTROL;
951 __skb_queue_tail(xmitq, _skb);
952 retransm--;
953 l->stats.retransmitted++;
954 }
955 return 0;
956}
957
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400958/* link_synch(): check if all packets arrived before the synch
959 * point have been consumed
960 * Returns true if the parallel links are synched, otherwise false
961 */
962static bool link_synch(struct tipc_link *l)
963{
964 unsigned int post_synch;
965 struct tipc_link *pl;
966
967 pl = tipc_parallel_link(l);
968 if (pl == l)
969 goto synched;
970
971 /* Was last pre-synch packet added to input queue ? */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400972 if (less_eq(pl->rcv_nxt, l->synch_point))
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400973 return false;
974
975 /* Is it still in the input queue ? */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400976 post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400977 if (skb_queue_len(pl->inputq) > post_synch)
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400978 return false;
979synched:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400980 l->exec_mode = TIPC_LINK_OPEN;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400981 return true;
982}
983
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500984/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +0200985 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500986 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +0200987 * Node lock must be held
988 */
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500989static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
Erik Hugne7ae934b2014-07-01 10:22:40 +0200990{
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500991 struct tipc_node *node = link->owner;
992 struct tipc_msg *msg = buf_msg(skb);
993 u32 dport = msg_destport(msg);
Erik Hugne7ae934b2014-07-01 10:22:40 +0200994
995 switch (msg_user(msg)) {
996 case TIPC_LOW_IMPORTANCE:
997 case TIPC_MEDIUM_IMPORTANCE:
998 case TIPC_HIGH_IMPORTANCE:
999 case TIPC_CRITICAL_IMPORTANCE:
1000 case CONN_MANAGER:
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -04001001 if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
1002 node->inputq = link->inputq;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001003 node->action_flags |= TIPC_MSG_EVT;
1004 }
1005 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001006 case NAME_DISTRIBUTOR:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001007 node->bclink.recv_permitted = true;
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -04001008 node->namedq = link->namedq;
1009 skb_queue_tail(link->namedq, skb);
1010 if (skb_queue_len(link->namedq) == 1)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001011 node->action_flags |= TIPC_NAMED_MSG_EVT;
1012 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001013 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001014 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001015 case MSG_FRAGMENTER:
1016 case BCAST_PROTOCOL:
1017 return false;
1018 default:
1019 pr_warn("Dropping received illegal msg type\n");
1020 kfree_skb(skb);
1021 return false;
1022 };
1023}
1024
1025/* tipc_link_input - process packet that has passed link protocol check
1026 *
1027 * Consumes buffer
1028 * Node lock must be held
1029 */
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001030static int tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001031{
1032 struct tipc_node *node = link->owner;
1033 struct tipc_msg *msg = buf_msg(skb);
1034 struct sk_buff *iskb;
1035 int pos = 0;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001036 int rc = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001037
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001038 switch (msg_user(msg)) {
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001039 case TUNNEL_PROTOCOL:
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001040 if (msg_dup(msg)) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001041 link->exec_mode = TIPC_LINK_TUNNEL;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001042 link->synch_point = msg_seqno(msg_get_wrapped(msg));
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001043 kfree_skb(skb);
1044 break;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001045 }
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001046 rc |= tipc_link_failover_rcv(link, &skb);
1047 if (!skb)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001048 break;
1049 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1050 tipc_data_input(link, skb);
1051 break;
1052 }
1053 case MSG_BUNDLER:
1054 link->stats.recv_bundles++;
1055 link->stats.recv_bundled += msg_msgcnt(msg);
1056
1057 while (tipc_msg_extract(skb, &iskb, &pos))
1058 tipc_data_input(link, iskb);
1059 break;
1060 case MSG_FRAGMENTER:
1061 link->stats.recv_fragments++;
1062 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1063 link->stats.recv_fragmented++;
1064 tipc_data_input(link, skb);
1065 } else if (!link->reasm_buf) {
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001066 link->exec_mode = TIPC_LINK_BLOCKED;
1067 rc |= TIPC_LINK_DOWN_EVT;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001068 }
1069 break;
1070 case BCAST_PROTOCOL:
1071 tipc_link_sync_rcv(node, skb);
Erik Hugne7ae934b2014-07-01 10:22:40 +02001072 break;
1073 default:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001074 break;
1075 };
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001076 return rc;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001077}
1078
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001079static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1080{
1081 bool released = false;
1082 struct sk_buff *skb, *tmp;
1083
1084 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1085 if (more(buf_seqno(skb), acked))
1086 break;
1087 __skb_unlink(skb, &l->transmq);
1088 kfree_skb(skb);
1089 released = true;
1090 }
1091 return released;
1092}
1093
1094/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1095 * @link: the link that should handle the message
1096 * @skb: TIPC packet
1097 * @xmitq: queue to place packets to be sent after this call
1098 */
1099int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1100 struct sk_buff_head *xmitq)
1101{
1102 struct sk_buff_head *arrvq = &l->deferdq;
1103 struct sk_buff *tmp;
1104 struct tipc_msg *hdr;
1105 u16 seqno, rcv_nxt;
1106 int rc = 0;
1107
1108 if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
1109 if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
1110 tipc_link_build_proto_msg(l, STATE_MSG, 0,
1111 0, 0, 0, xmitq);
1112 return rc;
1113 }
1114
1115 skb_queue_walk_safe(arrvq, skb, tmp) {
1116 hdr = buf_msg(skb);
1117
1118 /* Verify and update link state */
1119 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
1120 __skb_dequeue(arrvq);
1121 rc |= tipc_link_proto_rcv(l, skb, xmitq);
1122 continue;
1123 }
1124
1125 if (unlikely(!link_working(l))) {
1126 rc |= tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
1127 if (!link_working(l)) {
1128 kfree_skb(__skb_dequeue(arrvq));
1129 return rc;
1130 }
1131 }
1132
1133 l->silent_intv_cnt = 0;
1134
1135 /* Forward queues and wake up waiting users */
1136 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1137 tipc_link_advance_backlog(l, xmitq);
1138 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1139 link_prepare_wakeup(l);
1140 }
1141
1142 /* Defer reception if there is a gap in the sequence */
1143 seqno = msg_seqno(hdr);
1144 rcv_nxt = l->rcv_nxt;
1145 if (unlikely(less(rcv_nxt, seqno))) {
1146 l->stats.deferred_recv++;
1147 return rc;
1148 }
1149
1150 __skb_dequeue(arrvq);
1151
1152 /* Drop if packet already received */
1153 if (unlikely(more(rcv_nxt, seqno))) {
1154 l->stats.duplicates++;
1155 kfree_skb(skb);
1156 return rc;
1157 }
1158
1159 /* Synchronize with parallel link if applicable */
1160 if (unlikely(l->exec_mode == TIPC_LINK_TUNNEL))
1161 if (!msg_dup(hdr) && !link_synch(l)) {
1162 kfree_skb(skb);
1163 return rc;
1164 }
1165
1166 /* Packet can be delivered */
1167 l->rcv_nxt++;
1168 l->stats.recv_info++;
1169 if (unlikely(!tipc_data_input(l, skb)))
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001170 rc |= tipc_link_input(l, skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001171
1172 /* Ack at regular intervals */
1173 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1174 l->rcv_unacked = 0;
1175 l->stats.sent_acks++;
1176 tipc_link_build_proto_msg(l, STATE_MSG,
1177 0, 0, 0, 0, xmitq);
1178 }
1179 }
1180 return rc;
1181}
1182
Erik Hugne7ae934b2014-07-01 10:22:40 +02001183/**
Allan Stephens8809b252011-10-25 10:44:35 -04001184 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1185 *
1186 * Returns increase in queue length (i.e. 0 or 1)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001187 */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001188u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001189{
Ying Xuebc6fecd2014-11-26 11:41:53 +08001190 struct sk_buff *skb1;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001191 u16 seq_no = buf_seqno(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001192
1193 /* Empty queue ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001194 if (skb_queue_empty(list)) {
1195 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001196 return 1;
1197 }
1198
1199 /* Last ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001200 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1201 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001202 return 1;
1203 }
1204
Allan Stephens8809b252011-10-25 10:44:35 -04001205 /* Locate insertion point in queue, then insert; discard if duplicate */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001206 skb_queue_walk(list, skb1) {
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001207 u16 curr_seqno = buf_seqno(skb1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001208
Allan Stephens8809b252011-10-25 10:44:35 -04001209 if (seq_no == curr_seqno) {
Ying Xuebc6fecd2014-11-26 11:41:53 +08001210 kfree_skb(skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001211 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001212 }
Allan Stephens8809b252011-10-25 10:44:35 -04001213
1214 if (less(seq_no, curr_seqno))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001215 break;
Allan Stephens8809b252011-10-25 10:44:35 -04001216 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001217
Ying Xuebc6fecd2014-11-26 11:41:53 +08001218 __skb_queue_before(list, skb1, skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001219 return 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001220}
1221
Allan Stephens8809b252011-10-25 10:44:35 -04001222/*
Per Lidenb97bf3f2006-01-02 19:04:38 +01001223 * Send protocol message to the other endpoint.
1224 */
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001225void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001226 u32 gap, u32 tolerance, u32 priority)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001227{
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001228 struct sk_buff *skb = NULL;
1229 struct sk_buff_head xmitq;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001230
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001231 __skb_queue_head_init(&xmitq);
1232 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1233 tolerance, priority, &xmitq);
1234 skb = __skb_dequeue(&xmitq);
1235 if (!skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001236 return;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001237 tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
1238 l->rcv_unacked = 0;
1239 kfree_skb(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001240}
1241
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001242/* tipc_link_build_proto_msg: prepare link protocol message for transmission
1243 */
1244static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1245 u16 rcvgap, int tolerance, int priority,
1246 struct sk_buff_head *xmitq)
1247{
1248 struct sk_buff *skb = NULL;
1249 struct tipc_msg *hdr = l->pmsg;
1250 u16 snd_nxt = l->snd_nxt;
1251 u16 rcv_nxt = l->rcv_nxt;
1252 u16 rcv_last = rcv_nxt - 1;
1253 int node_up = l->owner->bclink.recv_permitted;
1254
1255 /* Don't send protocol message during reset or link failover */
1256 if (l->exec_mode == TIPC_LINK_BLOCKED)
1257 return;
1258
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001259 msg_set_type(hdr, mtyp);
1260 msg_set_net_plane(hdr, l->net_plane);
1261 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1262 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
1263 msg_set_link_tolerance(hdr, tolerance);
1264 msg_set_linkprio(hdr, priority);
1265 msg_set_redundant_link(hdr, node_up);
1266 msg_set_seq_gap(hdr, 0);
1267
1268 /* Compatibility: created msg must not be in sequence with pkt flow */
1269 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
1270
1271 if (mtyp == STATE_MSG) {
1272 if (!tipc_link_is_up(l))
1273 return;
1274 msg_set_next_sent(hdr, snd_nxt);
1275
1276 /* Override rcvgap if there are packets in deferred queue */
1277 if (!skb_queue_empty(&l->deferdq))
1278 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
1279 if (rcvgap) {
1280 msg_set_seq_gap(hdr, rcvgap);
1281 l->stats.sent_nacks++;
1282 }
1283 msg_set_ack(hdr, rcv_last);
1284 msg_set_probe(hdr, probe);
1285 if (probe)
1286 l->stats.sent_probes++;
1287 l->stats.sent_states++;
1288 } else {
1289 /* RESET_MSG or ACTIVATE_MSG */
1290 msg_set_max_pkt(hdr, l->advertised_mtu);
1291 msg_set_ack(hdr, l->failover_checkpt - 1);
1292 msg_set_next_sent(hdr, 1);
1293 }
1294 skb = tipc_buf_acquire(msg_size(hdr));
1295 if (!skb)
1296 return;
1297 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1298 skb->priority = TC_PRIO_CONTROL;
1299 __skb_queue_head(xmitq, skb);
1300}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001301
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001302/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1303 * a different bearer. Owner node is locked.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001304 */
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001305static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1306 struct tipc_msg *tunnel_hdr,
1307 struct tipc_msg *msg,
1308 u32 selector)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001309{
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001310 struct tipc_link *tunnel;
Ying Xuea6ca1092014-11-26 11:41:55 +08001311 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001312 u32 length = msg_size(msg);
1313
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001314 tunnel = node_active_link(l_ptr->owner, selector & 1);
Allan Stephens5392d642006-06-25 23:52:50 -07001315 if (!tipc_link_is_up(tunnel)) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001316 pr_warn("%stunnel link no longer available\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001317 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001318 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001319 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
Ying Xuea6ca1092014-11-26 11:41:55 +08001320 skb = tipc_buf_acquire(length + INT_H_SIZE);
1321 if (!skb) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001322 pr_warn("%sunable to send tunnel msg\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001323 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001324 }
Ying Xuea6ca1092014-11-26 11:41:55 +08001325 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1326 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1327 __tipc_link_xmit_skb(tunnel, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001328}
1329
1330
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001331/* tipc_link_failover_send_queue(): A link has gone down, but a second
1332 * link is still active. We can do failover. Tunnel the failing link's
1333 * whole send queue via the remaining link. This way, we don't lose
1334 * any packets, and sequence order is preserved for subsequent traffic
1335 * sent over the remaining link. Owner node is locked.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001336 */
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001337void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001338{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001339 int msgcount;
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001340 struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001341 struct tipc_msg tunnel_hdr;
Ying Xue58dc55f2014-11-26 11:41:52 +08001342 struct sk_buff *skb;
Allan Stephens5392d642006-06-25 23:52:50 -07001343 int split_bundles;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001344
1345 if (!tunnel)
1346 return;
1347
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001348 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1349 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001350
1351 skb_queue_walk(&l_ptr->backlogq, skb) {
1352 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1353 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1354 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001355 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001356 tipc_link_purge_backlog(l_ptr);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001357 msgcount = skb_queue_len(&l_ptr->transmq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001358 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1359 msg_set_msgcnt(&tunnel_hdr, msgcount);
Allan Stephensf1310722006-06-25 23:51:37 -07001360
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001361 if (skb_queue_empty(&l_ptr->transmq)) {
Ying Xue58dc55f2014-11-26 11:41:52 +08001362 skb = tipc_buf_acquire(INT_H_SIZE);
1363 if (skb) {
1364 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001365 msg_set_size(&tunnel_hdr, INT_H_SIZE);
Ying Xuea6ca1092014-11-26 11:41:55 +08001366 __tipc_link_xmit_skb(tunnel, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001367 } else {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001368 pr_warn("%sunable to send changeover msg\n",
1369 link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001370 }
1371 return;
1372 }
Allan Stephensf1310722006-06-25 23:51:37 -07001373
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001374 split_bundles = (node_active_link(l_ptr->owner, 0) !=
1375 node_active_link(l_ptr->owner, 0));
Allan Stephens5392d642006-06-25 23:52:50 -07001376
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001377 skb_queue_walk(&l_ptr->transmq, skb) {
Ying Xue58dc55f2014-11-26 11:41:52 +08001378 struct tipc_msg *msg = buf_msg(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001379
1380 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
Per Lidenb97bf3f2006-01-02 19:04:38 +01001381 struct tipc_msg *m = msg_get_wrapped(msg);
Allan Stephens0e659672010-12-31 18:59:32 +00001382 unchar *pos = (unchar *)m;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001383
Florian Westphald788d802007-08-02 19:28:06 -07001384 msgcount = msg_msgcnt(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001385 while (msgcount--) {
Allan Stephens0e659672010-12-31 18:59:32 +00001386 msg_set_seqno(m, msg_seqno(msg));
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001387 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1388 msg_link_selector(m));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001389 pos += align(msg_size(m));
1390 m = (struct tipc_msg *)pos;
1391 }
1392 } else {
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001393 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1394 msg_link_selector(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001395 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001396 }
1397}
1398
Ying Xue247f0f32014-02-18 16:06:46 +08001399/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001400 * duplicate of the first link's send queue via the new link. This way, we
1401 * are guaranteed that currently queued packets from a socket are delivered
1402 * before future traffic from the same socket, even if this is using the
1403 * new link. The last arriving copy of each duplicate packet is dropped at
1404 * the receiving end by the regular protocol check, so packet cardinality
1405 * and sequence order is preserved per sender/receiver socket pair.
1406 * Owner node is locked.
1407 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001408void tipc_link_dup_queue_xmit(struct tipc_link *link,
1409 struct tipc_link *tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001410{
Ying Xue58dc55f2014-11-26 11:41:52 +08001411 struct sk_buff *skb;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001412 struct tipc_msg tnl_hdr;
1413 struct sk_buff_head *queue = &link->transmq;
1414 int mcnt;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001415 u16 seqno;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001416
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001417 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1418 SYNCH_MSG, INT_H_SIZE, link->addr);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001419 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1420 msg_set_msgcnt(&tnl_hdr, mcnt);
1421 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1422
1423tunnel_queue:
1424 skb_queue_walk(queue, skb) {
Ying Xue58dc55f2014-11-26 11:41:52 +08001425 struct sk_buff *outskb;
1426 struct tipc_msg *msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001427 u32 len = msg_size(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001428
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001429 msg_set_ack(msg, mod(link->rcv_nxt - 1));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001430 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1431 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1432 outskb = tipc_buf_acquire(len + INT_H_SIZE);
Ying Xue58dc55f2014-11-26 11:41:52 +08001433 if (outskb == NULL) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001434 pr_warn("%sunable to send duplicate msg\n",
1435 link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001436 return;
1437 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001438 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1439 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1440 skb->data, len);
1441 __tipc_link_xmit_skb(tnl, outskb);
1442 if (!tipc_link_is_up(link))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001443 return;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001444 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001445 if (queue == &link->backlogq)
1446 return;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001447 seqno = link->snd_nxt;
1448 skb_queue_walk(&link->backlogq, skb) {
1449 msg_set_seqno(buf_msg(skb), seqno);
1450 seqno = mod(seqno + 1);
1451 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001452 queue = &link->backlogq;
1453 goto tunnel_queue;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001454}
1455
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001456/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001457 * Owner node is locked.
1458 */
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001459static int tipc_link_failover_rcv(struct tipc_link *link,
1460 struct sk_buff **skb)
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001461{
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001462 struct tipc_msg *msg = buf_msg(*skb);
1463 struct sk_buff *iskb = NULL;
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001464 struct tipc_link *pl = NULL;
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001465 int bearer_id = msg_bearer_id(msg);
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04001466 int pos = 0;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001467 int rc = 0;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001468
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001469 if (msg_type(msg) != FAILOVER_MSG) {
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001470 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1471 goto exit;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001472 }
Dan Carpentercb4b102f2013-05-06 08:28:41 +00001473 if (bearer_id >= MAX_BEARERS)
1474 goto exit;
Jon Paul Maloy1dab3d52014-02-13 17:29:10 -05001475
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001476 if (bearer_id == link->bearer_id)
1477 goto exit;
1478
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001479 pl = link->owner->links[bearer_id].link;
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001480
1481 if (link->failover_pkts == FIRST_FAILOVER)
1482 link->failover_pkts = msg_msgcnt(msg);
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001483
1484 /* Should we expect an inner packet? */
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001485 if (!link->failover_pkts)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001486 goto exit;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001487
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001488 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1489 pr_warn("%sno inner failover pkt\n", link_co_err);
1490 *skb = NULL;
1491 goto exit;
1492 }
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001493 link->failover_pkts--;
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001494 *skb = NULL;
1495
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001496 /* Was this packet already delivered? */
1497 if (less(buf_seqno(iskb), link->failover_checkpt)) {
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001498 kfree_skb(iskb);
1499 iskb = NULL;
1500 goto exit;
1501 }
1502 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1503 link->stats.recv_fragments++;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001504 if (!tipc_buf_append(&link->failover_skb, &iskb) &&
1505 !link->failover_skb) {
1506 link->exec_mode = TIPC_LINK_BLOCKED;
1507 rc |= TIPC_LINK_DOWN_EVT;
1508 }
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001509 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001510exit:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001511 if (!link->failover_pkts && pl)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001512 pl->exec_mode = TIPC_LINK_OPEN;
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001513 kfree_skb(*skb);
1514 *skb = iskb;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001515 return rc;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001516}
1517
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001518/* tipc_link_proto_rcv(): receive link level protocol message :
1519 * Note that network plane id propagates through the network, and may
1520 * change at any time. The node with lowest numerical id determines
1521 * network plane
1522 */
1523static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1524 struct sk_buff_head *xmitq)
1525{
1526 struct tipc_msg *hdr = buf_msg(skb);
1527 u16 rcvgap = 0;
1528 u16 nacked_gap = msg_seq_gap(hdr);
1529 u16 peers_snd_nxt = msg_next_sent(hdr);
1530 u16 peers_tol = msg_link_tolerance(hdr);
1531 u16 peers_prio = msg_linkprio(hdr);
1532 char *if_name;
1533 int rc = 0;
1534
1535 if (l->exec_mode == TIPC_LINK_BLOCKED)
1536 goto exit;
1537
1538 if (link_own_addr(l) > msg_prevnode(hdr))
1539 l->net_plane = msg_net_plane(hdr);
1540
1541 switch (msg_type(hdr)) {
1542 case RESET_MSG:
1543
1544 /* Ignore duplicate RESET with old session number */
1545 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1546 (l->peer_session != WILDCARD_SESSION))
1547 break;
1548 /* fall thru' */
1549 case ACTIVATE_MSG:
1550
1551 /* Complete own link name with peer's interface name */
1552 if_name = strrchr(l->name, ':') + 1;
1553 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1554 break;
1555 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1556 break;
1557 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1558
1559 /* Update own tolerance if peer indicates a non-zero value */
1560 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1561 l->tolerance = peers_tol;
1562
1563 /* Update own priority if peer's priority is higher */
1564 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1565 l->priority = peers_prio;
1566
1567 l->peer_session = msg_session(hdr);
1568 l->peer_bearer_id = msg_bearer_id(hdr);
1569 rc = tipc_link_fsm_evt(l, msg_type(hdr), xmitq);
1570 if (l->mtu > msg_max_pkt(hdr))
1571 l->mtu = msg_max_pkt(hdr);
1572 break;
1573 case STATE_MSG:
1574 /* Update own tolerance if peer indicates a non-zero value */
1575 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1576 l->tolerance = peers_tol;
1577
1578 l->silent_intv_cnt = 0;
1579 l->stats.recv_states++;
1580 if (msg_probe(hdr))
1581 l->stats.recv_probes++;
1582 rc = tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
1583 if (!tipc_link_is_up(l))
1584 break;
1585
1586 /* Has peer sent packets we haven't received yet ? */
1587 if (more(peers_snd_nxt, l->rcv_nxt))
1588 rcvgap = peers_snd_nxt - l->rcv_nxt;
1589 if (rcvgap || (msg_probe(hdr)))
1590 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
Jon Paul Maloy16040892015-07-21 06:42:28 -04001591 0, 0, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001592 tipc_link_release_pkts(l, msg_ack(hdr));
1593
1594 /* If NACK, retransmit will now start at right position */
1595 if (nacked_gap) {
1596 rc |= tipc_link_retransm(l, nacked_gap, xmitq);
1597 l->stats.recv_nacks++;
1598 }
1599 tipc_link_advance_backlog(l, xmitq);
1600 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1601 link_prepare_wakeup(l);
1602 }
1603exit:
1604 kfree_skb(skb);
1605 return rc;
1606}
1607
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001608void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001609{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001610 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001611
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001612 l->window = win;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001613 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1614 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1615 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1616 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1617 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001618}
1619
Jon Paul Maloye099e862014-02-13 17:29:18 -05001620/* tipc_link_find_owner - locate owner node of link by link's name
Ying Xuef2f98002015-01-09 15:27:05 +08001621 * @net: the applicable net namespace
Jon Paul Maloye099e862014-02-13 17:29:18 -05001622 * @name: pointer to link name string
1623 * @bearer_id: pointer to index in 'node->links' array where the link was found.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001624 *
Jon Paul Maloye099e862014-02-13 17:29:18 -05001625 * Returns pointer to node owning the link, or 0 if no matching link is found.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001626 */
Ying Xuef2f98002015-01-09 15:27:05 +08001627static struct tipc_node *tipc_link_find_owner(struct net *net,
1628 const char *link_name,
Jon Paul Maloye099e862014-02-13 17:29:18 -05001629 unsigned int *bearer_id)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001630{
Ying Xuef2f98002015-01-09 15:27:05 +08001631 struct tipc_net *tn = net_generic(net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001632 struct tipc_link *l_ptr;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001633 struct tipc_node *n_ptr;
Fabian Frederick886eaa12014-12-25 12:05:50 +01001634 struct tipc_node *found_node = NULL;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001635 int i;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001636
Jon Paul Maloye099e862014-02-13 17:29:18 -05001637 *bearer_id = 0;
Ying Xue6c7a7622014-03-27 12:54:37 +08001638 rcu_read_lock();
Ying Xuef2f98002015-01-09 15:27:05 +08001639 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001640 tipc_node_lock(n_ptr);
Erik Hugnebbfbe472013-10-18 07:23:21 +02001641 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001642 l_ptr = n_ptr->links[i].link;
Jon Paul Maloye099e862014-02-13 17:29:18 -05001643 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1644 *bearer_id = i;
1645 found_node = n_ptr;
1646 break;
1647 }
Erik Hugnebbfbe472013-10-18 07:23:21 +02001648 }
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001649 tipc_node_unlock(n_ptr);
Jon Paul Maloye099e862014-02-13 17:29:18 -05001650 if (found_node)
1651 break;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001652 }
Ying Xue6c7a7622014-03-27 12:54:37 +08001653 rcu_read_unlock();
1654
Jon Paul Maloye099e862014-02-13 17:29:18 -05001655 return found_node;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001656}
1657
Allan Stephens5c216e12011-10-18 11:34:29 -04001658/**
Per Lidenb97bf3f2006-01-02 19:04:38 +01001659 * link_reset_statistics - reset link statistics
1660 * @l_ptr: pointer to link
1661 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001662static void link_reset_statistics(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001663{
1664 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001665 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1666 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001667}
1668
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001669static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001670{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001671 struct sk_buff *hskb = skb_peek(&l->transmq);
1672 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
1673 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001674
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001675 pr_info("%s Link <%s>:", str, l->name);
Allan Stephens8d64a5b2010-12-31 18:59:27 +00001676
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001677 if (link_probing(l))
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -04001678 pr_cont(":P\n");
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001679 else if (link_establishing(l))
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -04001680 pr_cont(":E\n");
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001681 else if (link_resetting(l))
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -04001682 pr_cont(":R\n");
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001683 else if (link_working(l))
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -04001684 pr_cont(":W\n");
Paul Gortmaker5deedde2012-07-11 19:27:56 -04001685 else
1686 pr_cont("\n");
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001687
1688 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1689 skb_queue_len(&l->transmq), head, tail,
1690 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001691}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001692
1693/* Parse and validate nested (link) properties valid for media, bearer and link
1694 */
1695int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1696{
1697 int err;
1698
1699 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1700 tipc_nl_prop_policy);
1701 if (err)
1702 return err;
1703
1704 if (props[TIPC_NLA_PROP_PRIO]) {
1705 u32 prio;
1706
1707 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1708 if (prio > TIPC_MAX_LINK_PRI)
1709 return -EINVAL;
1710 }
1711
1712 if (props[TIPC_NLA_PROP_TOL]) {
1713 u32 tol;
1714
1715 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1716 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1717 return -EINVAL;
1718 }
1719
1720 if (props[TIPC_NLA_PROP_WIN]) {
1721 u32 win;
1722
1723 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1724 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1725 return -EINVAL;
1726 }
1727
1728 return 0;
1729}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001730
Richard Alpef96ce7a2014-11-20 10:29:13 +01001731int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1732{
1733 int err;
1734 int res = 0;
1735 int bearer_id;
1736 char *name;
1737 struct tipc_link *link;
1738 struct tipc_node *node;
1739 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe37e2d482015-02-09 09:50:08 +01001740 struct net *net = sock_net(skb->sk);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001741
1742 if (!info->attrs[TIPC_NLA_LINK])
1743 return -EINVAL;
1744
1745 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1746 info->attrs[TIPC_NLA_LINK],
1747 tipc_nl_link_policy);
1748 if (err)
1749 return err;
1750
1751 if (!attrs[TIPC_NLA_LINK_NAME])
1752 return -EINVAL;
1753
1754 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1755
Richard Alpe670f4f82015-05-06 13:58:55 +02001756 if (strcmp(name, tipc_bclink_name) == 0)
1757 return tipc_nl_bc_link_set(net, attrs);
1758
Ying Xuef2f98002015-01-09 15:27:05 +08001759 node = tipc_link_find_owner(net, name, &bearer_id);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001760 if (!node)
1761 return -EINVAL;
1762
1763 tipc_node_lock(node);
1764
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001765 link = node->links[bearer_id].link;
Richard Alpef96ce7a2014-11-20 10:29:13 +01001766 if (!link) {
1767 res = -EINVAL;
1768 goto out;
1769 }
1770
1771 if (attrs[TIPC_NLA_LINK_PROP]) {
1772 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1773
1774 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1775 props);
1776 if (err) {
1777 res = err;
1778 goto out;
1779 }
1780
1781 if (props[TIPC_NLA_PROP_TOL]) {
1782 u32 tol;
1783
1784 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -04001785 link->tolerance = tol;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001786 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001787 }
1788 if (props[TIPC_NLA_PROP_PRIO]) {
1789 u32 prio;
1790
1791 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1792 link->priority = prio;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001793 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001794 }
1795 if (props[TIPC_NLA_PROP_WIN]) {
1796 u32 win;
1797
1798 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1799 tipc_link_set_queue_limits(link, win);
1800 }
1801 }
1802
1803out:
1804 tipc_node_unlock(node);
1805
1806 return res;
1807}
Richard Alped8182802014-11-24 11:10:29 +01001808
1809static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001810{
1811 int i;
1812 struct nlattr *stats;
1813
1814 struct nla_map {
1815 u32 key;
1816 u32 val;
1817 };
1818
1819 struct nla_map map[] = {
1820 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1821 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1822 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1823 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1824 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1825 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1826 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1827 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1828 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1829 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1830 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1831 s->msg_length_counts : 1},
1832 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1833 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1834 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1835 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1836 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1837 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1838 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1839 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1840 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1841 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1842 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1843 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1844 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1845 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1846 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1847 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1848 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1849 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1850 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1851 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1852 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1853 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1854 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1855 };
1856
1857 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1858 if (!stats)
1859 return -EMSGSIZE;
1860
1861 for (i = 0; i < ARRAY_SIZE(map); i++)
1862 if (nla_put_u32(skb, map[i].key, map[i].val))
1863 goto msg_full;
1864
1865 nla_nest_end(skb, stats);
1866
1867 return 0;
1868msg_full:
1869 nla_nest_cancel(skb, stats);
1870
1871 return -EMSGSIZE;
1872}
1873
1874/* Caller should hold appropriate locks to protect the link */
Ying Xue34747532015-01-09 15:27:10 +08001875static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001876 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001877{
1878 int err;
1879 void *hdr;
1880 struct nlattr *attrs;
1881 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001882 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001883
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001884 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001885 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001886 if (!hdr)
1887 return -EMSGSIZE;
1888
1889 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1890 if (!attrs)
1891 goto msg_full;
1892
1893 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1894 goto attr_msg_full;
1895 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001896 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001897 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001898 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001899 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001900 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001901 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001902 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001903 goto attr_msg_full;
1904
1905 if (tipc_link_is_up(link))
1906 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1907 goto attr_msg_full;
1908 if (tipc_link_is_active(link))
1909 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1910 goto attr_msg_full;
1911
1912 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1913 if (!prop)
1914 goto attr_msg_full;
1915 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1916 goto prop_msg_full;
1917 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1918 goto prop_msg_full;
1919 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001920 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001921 goto prop_msg_full;
1922 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1923 goto prop_msg_full;
1924 nla_nest_end(msg->skb, prop);
1925
1926 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1927 if (err)
1928 goto attr_msg_full;
1929
1930 nla_nest_end(msg->skb, attrs);
1931 genlmsg_end(msg->skb, hdr);
1932
1933 return 0;
1934
1935prop_msg_full:
1936 nla_nest_cancel(msg->skb, prop);
1937attr_msg_full:
1938 nla_nest_cancel(msg->skb, attrs);
1939msg_full:
1940 genlmsg_cancel(msg->skb, hdr);
1941
1942 return -EMSGSIZE;
1943}
1944
1945/* Caller should hold node lock */
Ying Xue34747532015-01-09 15:27:10 +08001946static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1947 struct tipc_node *node, u32 *prev_link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001948{
1949 u32 i;
1950 int err;
1951
1952 for (i = *prev_link; i < MAX_BEARERS; i++) {
1953 *prev_link = i;
1954
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001955 if (!node->links[i].link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001956 continue;
1957
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001958 err = __tipc_nl_add_link(net, msg,
1959 node->links[i].link, NLM_F_MULTI);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001960 if (err)
1961 return err;
1962 }
1963 *prev_link = 0;
1964
1965 return 0;
1966}
1967
1968int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1969{
Ying Xuef2f98002015-01-09 15:27:05 +08001970 struct net *net = sock_net(skb->sk);
1971 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001972 struct tipc_node *node;
1973 struct tipc_nl_msg msg;
1974 u32 prev_node = cb->args[0];
1975 u32 prev_link = cb->args[1];
1976 int done = cb->args[2];
1977 int err;
1978
1979 if (done)
1980 return 0;
1981
1982 msg.skb = skb;
1983 msg.portid = NETLINK_CB(cb->skb).portid;
1984 msg.seq = cb->nlh->nlmsg_seq;
1985
1986 rcu_read_lock();
Richard Alpe7be57fc2014-11-20 10:29:12 +01001987 if (prev_node) {
Ying Xuef2f98002015-01-09 15:27:05 +08001988 node = tipc_node_find(net, prev_node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001989 if (!node) {
1990 /* We never set seq or call nl_dump_check_consistent()
1991 * this means that setting prev_seq here will cause the
1992 * consistence check to fail in the netlink callback
1993 * handler. Resulting in the last NLMSG_DONE message
1994 * having the NLM_F_DUMP_INTR flag set.
1995 */
1996 cb->prev_seq = 1;
1997 goto out;
1998 }
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001999 tipc_node_put(node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002000
Ying Xuef2f98002015-01-09 15:27:05 +08002001 list_for_each_entry_continue_rcu(node, &tn->node_list,
2002 list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01002003 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08002004 err = __tipc_nl_add_node_links(net, &msg, node,
2005 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002006 tipc_node_unlock(node);
2007 if (err)
2008 goto out;
2009
2010 prev_node = node->addr;
2011 }
2012 } else {
Ying Xue1da46562015-01-09 15:27:07 +08002013 err = tipc_nl_add_bc_link(net, &msg);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002014 if (err)
2015 goto out;
2016
Ying Xuef2f98002015-01-09 15:27:05 +08002017 list_for_each_entry_rcu(node, &tn->node_list, list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01002018 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08002019 err = __tipc_nl_add_node_links(net, &msg, node,
2020 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002021 tipc_node_unlock(node);
2022 if (err)
2023 goto out;
2024
2025 prev_node = node->addr;
2026 }
2027 }
2028 done = 1;
2029out:
2030 rcu_read_unlock();
2031
2032 cb->args[0] = prev_node;
2033 cb->args[1] = prev_link;
2034 cb->args[2] = done;
2035
2036 return skb->len;
2037}
2038
2039int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2040{
Ying Xuef2f98002015-01-09 15:27:05 +08002041 struct net *net = genl_info_net(info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002042 struct tipc_nl_msg msg;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002043 char *name;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002044 int err;
2045
Richard Alpe7be57fc2014-11-20 10:29:12 +01002046 msg.portid = info->snd_portid;
2047 msg.seq = info->snd_seq;
2048
Richard Alpe670f4f82015-05-06 13:58:55 +02002049 if (!info->attrs[TIPC_NLA_LINK_NAME])
2050 return -EINVAL;
2051 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2052
2053 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2054 if (!msg.skb)
2055 return -ENOMEM;
2056
2057 if (strcmp(name, tipc_bclink_name) == 0) {
2058 err = tipc_nl_add_bc_link(net, &msg);
2059 if (err) {
2060 nlmsg_free(msg.skb);
2061 return err;
2062 }
2063 } else {
2064 int bearer_id;
2065 struct tipc_node *node;
2066 struct tipc_link *link;
2067
2068 node = tipc_link_find_owner(net, name, &bearer_id);
2069 if (!node)
2070 return -EINVAL;
2071
2072 tipc_node_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002073 link = node->links[bearer_id].link;
Richard Alpe670f4f82015-05-06 13:58:55 +02002074 if (!link) {
2075 tipc_node_unlock(node);
2076 nlmsg_free(msg.skb);
2077 return -EINVAL;
2078 }
2079
2080 err = __tipc_nl_add_link(net, &msg, link, 0);
2081 tipc_node_unlock(node);
2082 if (err) {
2083 nlmsg_free(msg.skb);
2084 return err;
2085 }
Richard Alpe7be57fc2014-11-20 10:29:12 +01002086 }
2087
Richard Alpe670f4f82015-05-06 13:58:55 +02002088 return genlmsg_reply(msg.skb, info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002089}
Richard Alpeae363422014-11-20 10:29:14 +01002090
2091int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2092{
2093 int err;
2094 char *link_name;
2095 unsigned int bearer_id;
2096 struct tipc_link *link;
2097 struct tipc_node *node;
2098 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe18178772015-02-09 09:50:09 +01002099 struct net *net = sock_net(skb->sk);
Richard Alpeae363422014-11-20 10:29:14 +01002100
2101 if (!info->attrs[TIPC_NLA_LINK])
2102 return -EINVAL;
2103
2104 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2105 info->attrs[TIPC_NLA_LINK],
2106 tipc_nl_link_policy);
2107 if (err)
2108 return err;
2109
2110 if (!attrs[TIPC_NLA_LINK_NAME])
2111 return -EINVAL;
2112
2113 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2114
2115 if (strcmp(link_name, tipc_bclink_name) == 0) {
Ying Xue1da46562015-01-09 15:27:07 +08002116 err = tipc_bclink_reset_stats(net);
Richard Alpeae363422014-11-20 10:29:14 +01002117 if (err)
2118 return err;
2119 return 0;
2120 }
2121
Ying Xuef2f98002015-01-09 15:27:05 +08002122 node = tipc_link_find_owner(net, link_name, &bearer_id);
Richard Alpeae363422014-11-20 10:29:14 +01002123 if (!node)
2124 return -EINVAL;
2125
2126 tipc_node_lock(node);
2127
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002128 link = node->links[bearer_id].link;
Richard Alpeae363422014-11-20 10:29:14 +01002129 if (!link) {
2130 tipc_node_unlock(node);
2131 return -EINVAL;
2132 }
2133
2134 link_reset_statistics(link);
2135
2136 tipc_node_unlock(node);
2137
2138 return 0;
2139}