blob: 136316fb37ec9ba29c757a70cdca1ae3b99abc1c [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010046
Ying Xue796c75d2013-06-17 10:54:48 -040047#include <linux/pkt_sched.h>
48
Jon Paul Maloy38206d52015-11-19 14:30:46 -050049struct tipc_stats {
50 u32 sent_info; /* used in counting # sent packets */
51 u32 recv_info; /* used in counting # recv'd packets */
52 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77};
78
79/**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050091 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040099 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500127 struct net *net;
128
129 /* Management and link supervision data */
130 u32 peer_session;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500131 u32 session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500132 u32 peer_bearer_id;
133 u32 bearer_id;
134 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500140 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500141 u32 priority;
142 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400143 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400144 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500145
146 /* Failover/synch */
147 u16 drop_point;
148 struct sk_buff *failover_reasm_skb;
149
150 /* Max packet negotiation */
151 u16 mtu;
152 u16 advertised_mtu;
153
154 /* Sending */
155 struct sk_buff_head transmq;
156 struct sk_buff_head backlogq;
157 struct {
158 u16 len;
159 u16 limit;
160 } backlog[5];
161 u16 snd_nxt;
162 u16 last_retransm;
163 u16 window;
164 u32 stale_count;
165
166 /* Reception */
167 u16 rcv_nxt;
168 u32 rcv_unacked;
169 struct sk_buff_head deferdq;
170 struct sk_buff_head *inputq;
171 struct sk_buff_head *namedq;
172
173 /* Congestion handling */
174 struct sk_buff_head wakeupq;
175
176 /* Fragmentation/reassembly */
177 struct sk_buff *reasm_buf;
178
179 /* Broadcast */
180 u16 ackers;
181 u16 acked;
182 struct tipc_link *bc_rcvlink;
183 struct tipc_link *bc_sndlink;
184 int nack_state;
185 bool bc_peer_is_up;
186
187 /* Statistics */
188 struct tipc_stats stats;
189};
190
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400191/*
192 * Error message prefixes
193 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400194static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400195static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100196
Jon Paul Maloy52666982015-10-22 08:51:41 -0400197/* Send states for broadcast NACKs
198 */
199enum {
200 BC_NACK_SND_CONDITIONAL,
201 BC_NACK_SND_UNCONDITIONAL,
202 BC_NACK_SND_SUPPRESS,
203};
204
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900205/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400206 * Interval between NACKs when packets arrive out of order
207 */
208#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500209
210/* Wildcard value for link session numbers. When it is known that
211 * peer endpoint is down, any session number must be accepted.
Allan Stephensa686e682008-06-04 17:29:39 -0700212 */
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500213#define ANY_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -0700214
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400215/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400216 */
217enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400218 LINK_ESTABLISHED = 0xe,
219 LINK_ESTABLISHING = 0xe << 4,
220 LINK_RESET = 0x1 << 8,
221 LINK_RESETTING = 0x2 << 12,
222 LINK_PEER_RESET = 0xd << 16,
223 LINK_FAILINGOVER = 0xf << 20,
224 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400225};
226
227/* Link FSM state checking routines
228 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400229static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400230{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400231 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400232}
233
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400234static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
235 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400236static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
237 u16 rcvgap, int tolerance, int priority,
238 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500239static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400240static void tipc_link_build_nack_msg(struct tipc_link *l,
241 struct sk_buff_head *xmitq);
242static void tipc_link_build_bc_init_msg(struct tipc_link *l,
243 struct sk_buff_head *xmitq);
244static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400245
Per Lidenb97bf3f2006-01-02 19:04:38 +0100246/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800247 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100248 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400249bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100250{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400251 return link_is_up(l);
252}
253
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400254bool tipc_link_peer_is_down(struct tipc_link *l)
255{
256 return l->state == LINK_PEER_RESET;
257}
258
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400259bool tipc_link_is_reset(struct tipc_link *l)
260{
261 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
262}
263
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400264bool tipc_link_is_establishing(struct tipc_link *l)
265{
266 return l->state == LINK_ESTABLISHING;
267}
268
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400269bool tipc_link_is_synching(struct tipc_link *l)
270{
271 return l->state == LINK_SYNCHING;
272}
273
274bool tipc_link_is_failingover(struct tipc_link *l)
275{
276 return l->state == LINK_FAILINGOVER;
277}
278
279bool tipc_link_is_blocked(struct tipc_link *l)
280{
281 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100282}
283
Wu Fengguang742e0382015-10-24 22:56:01 +0800284static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400285{
286 return !l->bc_sndlink;
287}
288
Wu Fengguang742e0382015-10-24 22:56:01 +0800289static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400290{
291 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
292}
293
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400294int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100295{
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400296 return l->active;
297}
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400298
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400299void tipc_link_set_active(struct tipc_link *l, bool active)
300{
301 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100302}
303
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500304u32 tipc_link_id(struct tipc_link *l)
305{
306 return l->peer_bearer_id << 16 | l->bearer_id;
307}
308
309int tipc_link_window(struct tipc_link *l)
310{
311 return l->window;
312}
313
314int tipc_link_prio(struct tipc_link *l)
315{
316 return l->priority;
317}
318
319unsigned long tipc_link_tolerance(struct tipc_link *l)
320{
321 return l->tolerance;
322}
323
324struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
325{
326 return l->inputq;
327}
328
329char tipc_link_plane(struct tipc_link *l)
330{
331 return l->net_plane;
332}
333
Jon Paul Maloy52666982015-10-22 08:51:41 -0400334void tipc_link_add_bc_peer(struct tipc_link *snd_l,
335 struct tipc_link *uc_l,
336 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400337{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400338 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
339
340 snd_l->ackers++;
341 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500342 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400343 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400344}
345
Jon Paul Maloy52666982015-10-22 08:51:41 -0400346void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
347 struct tipc_link *rcv_l,
348 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400349{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400350 u16 ack = snd_l->snd_nxt - 1;
351
352 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400353 rcv_l->bc_peer_is_up = true;
354 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400355 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
356 tipc_link_reset(rcv_l);
357 rcv_l->state = LINK_RESET;
358 if (!snd_l->ackers) {
359 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500360 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400361 __skb_queue_purge(xmitq);
362 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400363}
364
365int tipc_link_bc_peers(struct tipc_link *l)
366{
367 return l->ackers;
368}
369
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400370u16 link_bc_rcv_gap(struct tipc_link *l)
371{
372 struct sk_buff *skb = skb_peek(&l->deferdq);
373 u16 gap = 0;
374
375 if (more(l->snd_nxt, l->rcv_nxt))
376 gap = l->snd_nxt - l->rcv_nxt;
377 if (skb)
378 gap = buf_seqno(skb) - l->rcv_nxt;
379 return gap;
380}
381
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400382void tipc_link_set_mtu(struct tipc_link *l, int mtu)
383{
384 l->mtu = mtu;
385}
386
387int tipc_link_mtu(struct tipc_link *l)
388{
389 return l->mtu;
390}
391
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500392u16 tipc_link_rcv_nxt(struct tipc_link *l)
393{
394 return l->rcv_nxt;
395}
396
397u16 tipc_link_acked(struct tipc_link *l)
398{
399 return l->acked;
400}
401
402char *tipc_link_name(struct tipc_link *l)
403{
404 return l->name;
405}
406
Per Lidenb97bf3f2006-01-02 19:04:38 +0100407/**
Per Liden4323add2006-01-18 00:38:21 +0100408 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400409 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400410 * @if_name: associated interface name
411 * @bearer_id: id (index) of associated bearer
412 * @tolerance: link tolerance to be used by link
413 * @net_plane: network plane (A,B,c..) this link belongs to
414 * @mtu: mtu to be advertised by link
415 * @priority: priority to be used by link
416 * @window: send window to be used by link
417 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400418 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400419 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400420 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400421 * @bc_sndlink: the namespace global link used for broadcast sending
422 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400423 * @inputq: queue to put messages ready for delivery
424 * @namedq: queue to put binding table update messages ready for delivery
425 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900426 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400427 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100428 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400429bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400430 int tolerance, char net_plane, u32 mtu, int priority,
431 int window, u32 session, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400432 u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400433 struct tipc_link *bc_sndlink,
434 struct tipc_link *bc_rcvlink,
435 struct sk_buff_head *inputq,
436 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400437 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100438{
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400439 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500440
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400441 l = kzalloc(sizeof(*l), GFP_ATOMIC);
442 if (!l)
443 return false;
444 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500445 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400446
447 /* Note: peer i/f name is completed by reset/activate message */
448 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
449 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
450 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500451 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400452 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400453 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400454 l->net = net;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500455 l->peer_session = ANY_SESSION;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400456 l->bearer_id = bearer_id;
457 l->tolerance = tolerance;
458 l->net_plane = net_plane;
459 l->advertised_mtu = mtu;
460 l->mtu = mtu;
461 l->priority = priority;
462 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400463 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400464 l->bc_sndlink = bc_sndlink;
465 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400466 l->inputq = inputq;
467 l->namedq = namedq;
468 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400469 __skb_queue_head_init(&l->transmq);
470 __skb_queue_head_init(&l->backlogq);
471 __skb_queue_head_init(&l->deferdq);
472 skb_queue_head_init(&l->wakeupq);
473 skb_queue_head_init(l->inputq);
474 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100475}
476
Jon Paul Maloy32301902015-10-22 08:51:37 -0400477/**
478 * tipc_link_bc_create - create new link to be used for broadcast
479 * @n: pointer to associated node
480 * @mtu: mtu to be used
481 * @window: send window to be used
482 * @inputq: queue to put messages ready for delivery
483 * @namedq: queue to put binding table update messages ready for delivery
484 * @link: return value, pointer to put the created link
485 *
486 * Returns true if link was created, otherwise false
487 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400488bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400489 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400490 struct sk_buff_head *inputq,
491 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400492 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400493 struct tipc_link **link)
494{
495 struct tipc_link *l;
496
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400497 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400498 0, ownnode, peer, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400499 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400500 return false;
501
502 l = *link;
503 strcpy(l->name, tipc_bclink_name);
504 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400505 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400506 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400507 l->bc_rcvlink = l;
508
509 /* Broadcast send link is always up */
510 if (link_is_bc_sndlink(l))
511 l->state = LINK_ESTABLISHED;
512
Jon Paul Maloy32301902015-10-22 08:51:37 -0400513 return true;
514}
515
Per Lidenb97bf3f2006-01-02 19:04:38 +0100516/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400517 * tipc_link_fsm_evt - link finite state machine
518 * @l: pointer to link
519 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400520 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400521int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400522{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400523 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400524
525 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400526 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400527 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400528 case LINK_PEER_RESET_EVT:
529 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400530 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400531 case LINK_RESET_EVT:
532 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400533 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400534 case LINK_FAILURE_EVT:
535 case LINK_FAILOVER_BEGIN_EVT:
536 case LINK_ESTABLISH_EVT:
537 case LINK_FAILOVER_END_EVT:
538 case LINK_SYNCH_BEGIN_EVT:
539 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400540 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400541 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400542 }
543 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400544 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400545 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400546 case LINK_PEER_RESET_EVT:
547 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400548 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400549 case LINK_FAILOVER_BEGIN_EVT:
550 l->state = LINK_FAILINGOVER;
551 case LINK_FAILURE_EVT:
552 case LINK_RESET_EVT:
553 case LINK_ESTABLISH_EVT:
554 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400555 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400556 case LINK_SYNCH_BEGIN_EVT:
557 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400558 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400559 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400560 }
561 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400562 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400563 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400564 case LINK_RESET_EVT:
565 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400566 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400567 case LINK_PEER_RESET_EVT:
568 case LINK_ESTABLISH_EVT:
569 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400570 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400571 case LINK_SYNCH_BEGIN_EVT:
572 case LINK_SYNCH_END_EVT:
573 case LINK_FAILOVER_BEGIN_EVT:
574 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400575 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400576 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400577 }
578 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400579 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400580 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400581 case LINK_FAILOVER_END_EVT:
582 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400583 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400584 case LINK_PEER_RESET_EVT:
585 case LINK_RESET_EVT:
586 case LINK_ESTABLISH_EVT:
587 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400588 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400589 case LINK_FAILOVER_BEGIN_EVT:
590 case LINK_SYNCH_BEGIN_EVT:
591 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400592 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400593 goto illegal_evt;
594 }
595 break;
596 case LINK_ESTABLISHING:
597 switch (evt) {
598 case LINK_ESTABLISH_EVT:
599 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400600 break;
601 case LINK_FAILOVER_BEGIN_EVT:
602 l->state = LINK_FAILINGOVER;
603 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400604 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400605 l->state = LINK_RESET;
606 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400607 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400608 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400609 case LINK_SYNCH_BEGIN_EVT:
610 case LINK_FAILOVER_END_EVT:
611 break;
612 case LINK_SYNCH_END_EVT:
613 default:
614 goto illegal_evt;
615 }
616 break;
617 case LINK_ESTABLISHED:
618 switch (evt) {
619 case LINK_PEER_RESET_EVT:
620 l->state = LINK_PEER_RESET;
621 rc |= TIPC_LINK_DOWN_EVT;
622 break;
623 case LINK_FAILURE_EVT:
624 l->state = LINK_RESETTING;
625 rc |= TIPC_LINK_DOWN_EVT;
626 break;
627 case LINK_RESET_EVT:
628 l->state = LINK_RESET;
629 break;
630 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400631 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400632 break;
633 case LINK_SYNCH_BEGIN_EVT:
634 l->state = LINK_SYNCHING;
635 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400636 case LINK_FAILOVER_BEGIN_EVT:
637 case LINK_FAILOVER_END_EVT:
638 default:
639 goto illegal_evt;
640 }
641 break;
642 case LINK_SYNCHING:
643 switch (evt) {
644 case LINK_PEER_RESET_EVT:
645 l->state = LINK_PEER_RESET;
646 rc |= TIPC_LINK_DOWN_EVT;
647 break;
648 case LINK_FAILURE_EVT:
649 l->state = LINK_RESETTING;
650 rc |= TIPC_LINK_DOWN_EVT;
651 break;
652 case LINK_RESET_EVT:
653 l->state = LINK_RESET;
654 break;
655 case LINK_ESTABLISH_EVT:
656 case LINK_SYNCH_BEGIN_EVT:
657 break;
658 case LINK_SYNCH_END_EVT:
659 l->state = LINK_ESTABLISHED;
660 break;
661 case LINK_FAILOVER_BEGIN_EVT:
662 case LINK_FAILOVER_END_EVT:
663 default:
664 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400665 }
666 break;
667 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400668 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400669 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400670 return rc;
671illegal_evt:
672 pr_err("Illegal FSM event %x in state %x on link %s\n",
673 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400674 return rc;
675}
676
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400677/* link_profile_stats - update statistical profiling of traffic
678 */
679static void link_profile_stats(struct tipc_link *l)
680{
681 struct sk_buff *skb;
682 struct tipc_msg *msg;
683 int length;
684
685 /* Update counters used in statistical profiling of send traffic */
686 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
687 l->stats.queue_sz_counts++;
688
689 skb = skb_peek(&l->transmq);
690 if (!skb)
691 return;
692 msg = buf_msg(skb);
693 length = msg_size(msg);
694
695 if (msg_user(msg) == MSG_FRAGMENTER) {
696 if (msg_type(msg) != FIRST_FRAGMENT)
697 return;
698 length = msg_size(msg_get_wrapped(msg));
699 }
700 l->stats.msg_lengths_total += length;
701 l->stats.msg_length_counts++;
702 if (length <= 64)
703 l->stats.msg_length_profile[0]++;
704 else if (length <= 256)
705 l->stats.msg_length_profile[1]++;
706 else if (length <= 1024)
707 l->stats.msg_length_profile[2]++;
708 else if (length <= 4096)
709 l->stats.msg_length_profile[3]++;
710 else if (length <= 16384)
711 l->stats.msg_length_profile[4]++;
712 else if (length <= 32768)
713 l->stats.msg_length_profile[5]++;
714 else
715 l->stats.msg_length_profile[6]++;
716}
717
718/* tipc_link_timeout - perform periodic task as instructed from node timeout
719 */
720int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
721{
Ying Xuec91522f2016-06-15 14:11:31 +0800722 int mtyp = 0;
723 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400724 bool state = false;
725 bool probe = false;
726 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400727 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
728 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400729 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400730
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400731 switch (l->state) {
732 case LINK_ESTABLISHED:
733 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400734 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400735 link_profile_stats(l);
736 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
737 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
738 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400739 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400740 state |= l->bc_rcvlink->rcv_unacked;
741 state |= l->rcv_unacked;
742 state |= !skb_queue_empty(&l->transmq);
743 state |= !skb_queue_empty(&l->deferdq);
744 probe = mstate->probing;
745 probe |= l->silent_intv_cnt;
746 if (probe || mstate->monitoring)
747 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400748 break;
749 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400750 setup = l->rst_cnt++ <= 4;
751 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400752 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400753 break;
754 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400755 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400756 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400757 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400758 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400759 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400760 case LINK_FAILINGOVER:
761 break;
762 default:
763 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400764 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400765
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400766 if (state || probe || setup)
767 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400768
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400769 return rc;
770}
771
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400772/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400773 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400774 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400775 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400776 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400777 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100778 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400779static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100780{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400781 struct tipc_msg *msg = buf_msg(skb_peek(list));
782 int imp = msg_importance(msg);
783 u32 oport = msg_origport(msg);
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500784 u32 addr = tipc_own_addr(link->net);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400785 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100786
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400787 /* This really cannot happen... */
788 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
789 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400790 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400791 }
792 /* Non-blocking sender: */
793 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
794 return -ELINKCONG;
795
796 /* Create and schedule wakeup pseudo message */
797 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
798 addr, addr, oport, 0, 0);
799 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400800 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400801 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
802 TIPC_SKB_CB(skb)->chain_imp = imp;
803 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400804 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400805 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100806}
807
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400808/**
809 * link_prepare_wakeup - prepare users for wakeup after congestion
810 * @link: congested link
811 * Move a number of waiting users, as permitted by available space in
812 * the send queue, from link wait queue to node wait queue for wakeup
813 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400814void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100815{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400816 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
817 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800818 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100819
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400820 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
821 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -0400822 lim = l->backlog[imp].limit;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400823 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
824 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100825 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400826 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400827 skb_queue_tail(l->inputq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100828 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100829}
830
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400831void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100832{
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500833 l->peer_session = ANY_SESSION;
834 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400835 l->mtu = l->advertised_mtu;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400836 __skb_queue_purge(&l->transmq);
837 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400838 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400839 __skb_queue_purge(&l->backlogq);
840 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
841 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
842 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
843 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
844 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400845 kfree_skb(l->reasm_buf);
846 kfree_skb(l->failover_reasm_skb);
847 l->reasm_buf = NULL;
848 l->failover_reasm_skb = NULL;
849 l->rcv_unacked = 0;
850 l->snd_nxt = 1;
851 l->rcv_nxt = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400852 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400853 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400854 l->rst_cnt = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400855 l->stats.recv_info = 0;
856 l->stale_count = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400857 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400858 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500859 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100860}
861
Per Lidenb97bf3f2006-01-02 19:04:38 +0100862/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400863 * tipc_link_xmit(): enqueue buffer list according to queue situation
864 * @link: link to use
865 * @list: chain of buffers containing message
866 * @xmitq: returned list of packets to be sent by caller
867 *
868 * Consumes the buffer chain, except when returning -ELINKCONG,
869 * since the caller then may want to make more send attempts.
870 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
871 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
872 */
873int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
874 struct sk_buff_head *xmitq)
875{
876 struct tipc_msg *hdr = buf_msg(skb_peek(list));
877 unsigned int maxwin = l->window;
878 unsigned int i, imp = msg_importance(hdr);
879 unsigned int mtu = l->mtu;
880 u16 ack = l->rcv_nxt - 1;
881 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400882 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400883 struct sk_buff_head *transmq = &l->transmq;
884 struct sk_buff_head *backlogq = &l->backlogq;
885 struct sk_buff *skb, *_skb, *bskb;
886
887 /* Match msg importance against this and all higher backlog limits: */
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -0400888 if (!skb_queue_empty(backlogq)) {
889 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
890 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
891 return link_schedule_user(l, list);
892 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400893 }
Richard Alpe4952cd32016-02-11 10:43:15 +0100894 if (unlikely(msg_size(hdr) > mtu)) {
895 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400896 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100897 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400898
899 /* Prepare each packet for sending, and add to relevant queue: */
900 while (skb_queue_len(list)) {
901 skb = skb_peek(list);
902 hdr = buf_msg(skb);
903 msg_set_seqno(hdr, seqno);
904 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400905 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400906
907 if (likely(skb_queue_len(transmq) < maxwin)) {
908 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100909 if (!_skb) {
910 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400911 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100912 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400913 __skb_dequeue(list);
914 __skb_queue_tail(transmq, skb);
915 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400916 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400917 l->rcv_unacked = 0;
918 seqno++;
919 continue;
920 }
921 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
922 kfree_skb(__skb_dequeue(list));
923 l->stats.sent_bundled++;
924 continue;
925 }
926 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
927 kfree_skb(__skb_dequeue(list));
928 __skb_queue_tail(backlogq, bskb);
929 l->backlog[msg_importance(buf_msg(bskb))].len++;
930 l->stats.sent_bundled++;
931 l->stats.sent_bundles++;
932 continue;
933 }
934 l->backlog[imp].len += skb_queue_len(list);
935 skb_queue_splice_tail_init(list, backlogq);
936 }
937 l->snd_nxt = seqno;
938 return 0;
939}
940
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400941void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
942{
943 struct sk_buff *skb, *_skb;
944 struct tipc_msg *hdr;
945 u16 seqno = l->snd_nxt;
946 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400947 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400948
949 while (skb_queue_len(&l->transmq) < l->window) {
950 skb = skb_peek(&l->backlogq);
951 if (!skb)
952 break;
953 _skb = skb_clone(skb, GFP_ATOMIC);
954 if (!_skb)
955 break;
956 __skb_dequeue(&l->backlogq);
957 hdr = buf_msg(skb);
958 l->backlog[msg_importance(hdr)].len--;
959 __skb_queue_tail(&l->transmq, skb);
960 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400961 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400962 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400963 msg_set_ack(hdr, ack);
964 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400965 l->rcv_unacked = 0;
966 seqno++;
967 }
968 l->snd_nxt = seqno;
969}
970
Jon Paul Maloy52666982015-10-22 08:51:41 -0400971static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700972{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400973 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700974
Jon Paul Maloy52666982015-10-22 08:51:41 -0400975 pr_warn("Retransmission failure on link <%s>\n", l->name);
976 link_print(l, "Resetting link ");
977 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
978 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
979 pr_info("sqno %u, prev: %x, src: %x\n",
980 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700981}
982
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400983int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
984 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400985{
986 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
987 struct tipc_msg *hdr;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400988 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400989 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400990
991 if (!skb)
992 return 0;
993
994 /* Detect repeated retransmit failures on same packet */
995 if (likely(l->last_retransm != buf_seqno(skb))) {
996 l->last_retransm = buf_seqno(skb);
997 l->stale_count = 1;
998 } else if (++l->stale_count > 100) {
999 link_retransmit_failure(l, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001000 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001001 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001002
1003 /* Move forward to where retransmission should start */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001004 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001005 if (!less(buf_seqno(skb), from))
1006 break;
1007 }
1008
1009 skb_queue_walk_from(&l->transmq, skb) {
1010 if (more(buf_seqno(skb), to))
1011 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001012 hdr = buf_msg(skb);
1013 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1014 if (!_skb)
1015 return 0;
1016 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001017 msg_set_ack(hdr, ack);
1018 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001019 _skb->priority = TC_PRIO_CONTROL;
1020 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001021 l->stats.retransmitted++;
1022 }
1023 return 0;
1024}
1025
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001026/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001027 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001028 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001029 * Node lock must be held
1030 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001031static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001032 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001033{
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001034 switch (msg_user(buf_msg(skb))) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001035 case TIPC_LOW_IMPORTANCE:
1036 case TIPC_MEDIUM_IMPORTANCE:
1037 case TIPC_HIGH_IMPORTANCE:
1038 case TIPC_CRITICAL_IMPORTANCE:
1039 case CONN_MANAGER:
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001040 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001041 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001042 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001043 l->bc_rcvlink->state = LINK_ESTABLISHED;
1044 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001045 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001046 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001047 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001048 case MSG_FRAGMENTER:
1049 case BCAST_PROTOCOL:
1050 return false;
1051 default:
1052 pr_warn("Dropping received illegal msg type\n");
1053 kfree_skb(skb);
1054 return false;
1055 };
1056}
1057
1058/* tipc_link_input - process packet that has passed link protocol check
1059 *
1060 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001061 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001062static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1063 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001064{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001065 struct tipc_msg *hdr = buf_msg(skb);
1066 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001067 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001068 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001069 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001070 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001071 int pos = 0;
1072 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001073
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001074 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1075 if (msg_type(hdr) == SYNCH_MSG) {
1076 __skb_queue_purge(&l->deferdq);
1077 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001078 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001079 if (!tipc_msg_extract(skb, &iskb, &ipos))
1080 return rc;
1081 kfree_skb(skb);
1082 skb = iskb;
1083 hdr = buf_msg(skb);
1084 if (less(msg_seqno(hdr), l->drop_point))
1085 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001086 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001087 return rc;
1088 usr = msg_user(hdr);
1089 reasm_skb = &l->failover_reasm_skb;
1090 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001091
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001092 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001093 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001094 l->stats.recv_bundles++;
1095 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001096 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001097 tipc_data_input(l, iskb, &tmpq);
1098 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001099 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001100 } else if (usr == MSG_FRAGMENTER) {
1101 l->stats.recv_fragments++;
1102 if (tipc_buf_append(reasm_skb, &skb)) {
1103 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001104 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001105 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1106 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001107 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001108 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001109 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001110 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001111 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001112 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001113 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001114 }
1115drop:
1116 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001117 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001118}
1119
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001120static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1121{
1122 bool released = false;
1123 struct sk_buff *skb, *tmp;
1124
1125 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1126 if (more(buf_seqno(skb), acked))
1127 break;
1128 __skb_unlink(skb, &l->transmq);
1129 kfree_skb(skb);
1130 released = true;
1131 }
1132 return released;
1133}
1134
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001135/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001136 *
1137 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1138 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001139 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001140int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001141{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001142 if (!l)
1143 return 0;
1144
1145 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1146 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001147 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001148 return 0;
1149 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001150
1151 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1152 l->snd_nxt = l->rcv_nxt;
1153 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001154 }
1155
1156 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001157 l->rcv_unacked = 0;
1158 l->stats.sent_acks++;
1159 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001160 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001161}
1162
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001163/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1164 */
1165void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1166{
1167 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001168 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001169
1170 if (l->state == LINK_ESTABLISHING)
1171 mtyp = ACTIVATE_MSG;
1172
1173 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001174
1175 /* Inform peer that this endpoint is going down if applicable */
1176 skb = skb_peek_tail(xmitq);
1177 if (skb && (l->state == LINK_RESET))
1178 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001179}
1180
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001181/* tipc_link_build_nack_msg: prepare link nack message for transmission
1182 */
1183static void tipc_link_build_nack_msg(struct tipc_link *l,
1184 struct sk_buff_head *xmitq)
1185{
1186 u32 def_cnt = ++l->stats.deferred_recv;
1187
Jon Paul Maloy52666982015-10-22 08:51:41 -04001188 if (link_is_bc_rcvlink(l))
1189 return;
1190
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001191 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1192 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1193}
1194
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001195/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001196 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001197 * @skb: TIPC packet
1198 * @xmitq: queue to place packets to be sent after this call
1199 */
1200int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1201 struct sk_buff_head *xmitq)
1202{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001203 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001204 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001205 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001206 int rc = 0;
1207
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001208 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001209 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001210 seqno = msg_seqno(hdr);
1211 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001212 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001213
1214 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001215 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1216 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001217
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001218 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001219 if (l->state == LINK_ESTABLISHING)
1220 rc = TIPC_LINK_UP_EVT;
1221 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001222 }
1223
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001224 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001225 l->silent_intv_cnt = 0;
1226
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001227 /* Drop if outside receive window */
1228 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1229 l->stats.duplicates++;
1230 goto drop;
1231 }
1232
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001233 /* Forward queues and wake up waiting users */
1234 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1235 tipc_link_advance_backlog(l, xmitq);
1236 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1237 link_prepare_wakeup(l);
1238 }
1239
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001240 /* Defer delivery if sequence gap */
1241 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001242 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001243 tipc_link_build_nack_msg(l, xmitq);
1244 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001245 }
1246
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001247 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001248 l->rcv_nxt++;
1249 l->stats.recv_info++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001250 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001251 rc |= tipc_link_input(l, skb, l->inputq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001252 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001253 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001254 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001255 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001256 } while ((skb = __skb_dequeue(defq)));
1257
1258 return rc;
1259drop:
1260 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001261 return rc;
1262}
1263
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001264static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1265 u16 rcvgap, int tolerance, int priority,
1266 struct sk_buff_head *xmitq)
1267{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001268 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001269 struct sk_buff *skb;
1270 struct tipc_msg *hdr;
1271 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001272 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001273 struct tipc_mon_state *mstate = &l->mon_state;
1274 int dlen = 0;
1275 void *data;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001276
1277 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001278 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001279 return;
1280
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001281 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1282 return;
1283
1284 if (!skb_queue_empty(dfq))
1285 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1286
1287 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001288 tipc_max_domain_size, l->addr,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001289 tipc_own_addr(l->net), 0, 0, 0);
1290 if (!skb)
1291 return;
1292
1293 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001294 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001295 msg_set_session(hdr, l->session);
1296 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001297 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001298 msg_set_next_sent(hdr, l->snd_nxt);
1299 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001300 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001301 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001302 msg_set_link_tolerance(hdr, tolerance);
1303 msg_set_linkprio(hdr, priority);
1304 msg_set_redundant_link(hdr, node_up);
1305 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001306 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001307
1308 if (mtyp == STATE_MSG) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001309 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001310 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001311 msg_set_probe(hdr, probe);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001312 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1313 msg_set_size(hdr, INT_H_SIZE + dlen);
1314 skb_trim(skb, INT_H_SIZE + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001315 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001316 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001317 } else {
1318 /* RESET_MSG or ACTIVATE_MSG */
1319 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001320 strcpy(data, l->if_name);
1321 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1322 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001323 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001324 if (probe)
1325 l->stats.sent_probes++;
1326 if (rcvgap)
1327 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001328 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001329 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001330}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001331
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001332/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001333 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001334 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001335void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1336 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001337{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001338 struct sk_buff *skb, *tnlskb;
1339 struct tipc_msg *hdr, tnlhdr;
1340 struct sk_buff_head *queue = &l->transmq;
1341 struct sk_buff_head tmpxq, tnlq;
1342 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001343
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001344 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001345 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001346
1347 skb_queue_head_init(&tnlq);
1348 skb_queue_head_init(&tmpxq);
1349
1350 /* At least one packet required for safe algorithm => add dummy */
1351 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001352 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001353 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001354 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001355 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001356 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001357 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001358 skb_queue_tail(&tnlq, skb);
1359 tipc_link_xmit(l, &tnlq, &tmpxq);
1360 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001361
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001362 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001363 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001364 mtyp, INT_H_SIZE, l->addr);
1365 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1366 msg_set_msgcnt(&tnlhdr, pktcnt);
1367 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1368tnl:
1369 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001370 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001371 hdr = buf_msg(skb);
1372 if (queue == &l->backlogq)
1373 msg_set_seqno(hdr, seqno++);
1374 pktlen = msg_size(hdr);
1375 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1376 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1377 if (!tnlskb) {
1378 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001379 return;
1380 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001381 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1382 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1383 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001384 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001385 if (queue != &l->backlogq) {
1386 queue = &l->backlogq;
1387 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001388 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001389
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001390 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001391
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001392 if (mtyp == FAILOVER_MSG) {
1393 tnl->drop_point = l->rcv_nxt;
1394 tnl->failover_reasm_skb = l->reasm_buf;
1395 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001396 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001397}
1398
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001399/* tipc_link_proto_rcv(): receive link level protocol message :
1400 * Note that network plane id propagates through the network, and may
1401 * change at any time. The node with lowest numerical id determines
1402 * network plane
1403 */
1404static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1405 struct sk_buff_head *xmitq)
1406{
1407 struct tipc_msg *hdr = buf_msg(skb);
1408 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001409 u16 ack = msg_ack(hdr);
1410 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001411 u16 peers_snd_nxt = msg_next_sent(hdr);
1412 u16 peers_tol = msg_link_tolerance(hdr);
1413 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001414 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001415 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001416 int mtyp = msg_type(hdr);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001417 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001418 char *if_name;
1419 int rc = 0;
1420
Jon Paul Maloy52666982015-10-22 08:51:41 -04001421 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001422 goto exit;
1423
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001424 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001425 l->net_plane = msg_net_plane(hdr);
1426
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001427 skb_linearize(skb);
1428 hdr = buf_msg(skb);
1429 data = msg_data(hdr);
1430
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001431 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001432 case RESET_MSG:
1433
1434 /* Ignore duplicate RESET with old session number */
1435 if ((less_eq(msg_session(hdr), l->peer_session)) &&
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001436 (l->peer_session != ANY_SESSION))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001437 break;
1438 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001439
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001440 case ACTIVATE_MSG:
1441
1442 /* Complete own link name with peer's interface name */
1443 if_name = strrchr(l->name, ':') + 1;
1444 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1445 break;
1446 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1447 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001448 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001449
1450 /* Update own tolerance if peer indicates a non-zero value */
1451 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1452 l->tolerance = peers_tol;
1453
1454 /* Update own priority if peer's priority is higher */
1455 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1456 l->priority = peers_prio;
1457
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001458 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001459 if (msg_peer_stopping(hdr))
1460 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1461 else if ((mtyp == RESET_MSG) || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001462 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1463
1464 /* ACTIVATE_MSG takes up link if it was already locally reset */
1465 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1466 rc = TIPC_LINK_UP_EVT;
1467
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001468 l->peer_session = msg_session(hdr);
1469 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001470 if (l->mtu > msg_max_pkt(hdr))
1471 l->mtu = msg_max_pkt(hdr);
1472 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001473
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001474 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001475
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001476 /* Update own tolerance if peer indicates a non-zero value */
1477 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1478 l->tolerance = peers_tol;
1479
Richard Alpe81729812016-02-01 08:19:57 +01001480 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
1481 TIPC_MAX_LINK_PRI)) {
1482 l->priority = peers_prio;
1483 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1484 }
1485
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001486 l->silent_intv_cnt = 0;
1487 l->stats.recv_states++;
1488 if (msg_probe(hdr))
1489 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001490
1491 if (!link_is_up(l)) {
1492 if (l->state == LINK_ESTABLISHING)
1493 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001494 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001495 }
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001496 tipc_mon_rcv(l->net, data, dlen, l->addr,
1497 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001498
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001499 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001500 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001501 rcvgap = peers_snd_nxt - l->rcv_nxt;
1502 if (rcvgap || (msg_probe(hdr)))
1503 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
Jon Paul Maloy16040892015-07-21 06:42:28 -04001504 0, 0, xmitq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001505 tipc_link_release_pkts(l, ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001506
1507 /* If NACK, retransmit will now start at right position */
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001508 if (gap) {
1509 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001510 l->stats.recv_nacks++;
1511 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001512
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001513 tipc_link_advance_backlog(l, xmitq);
1514 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1515 link_prepare_wakeup(l);
1516 }
1517exit:
1518 kfree_skb(skb);
1519 return rc;
1520}
1521
Jon Paul Maloy52666982015-10-22 08:51:41 -04001522/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1523 */
1524static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1525 u16 peers_snd_nxt,
1526 struct sk_buff_head *xmitq)
1527{
1528 struct sk_buff *skb;
1529 struct tipc_msg *hdr;
1530 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1531 u16 ack = l->rcv_nxt - 1;
1532 u16 gap_to = peers_snd_nxt - 1;
1533
1534 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001535 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001536 if (!skb)
1537 return false;
1538 hdr = buf_msg(skb);
1539 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1540 msg_set_bcast_ack(hdr, ack);
1541 msg_set_bcgap_after(hdr, ack);
1542 if (dfrd_skb)
1543 gap_to = buf_seqno(dfrd_skb) - 1;
1544 msg_set_bcgap_to(hdr, gap_to);
1545 msg_set_non_seq(hdr, bcast);
1546 __skb_queue_tail(xmitq, skb);
1547 return true;
1548}
1549
1550/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1551 *
1552 * Give a newly added peer node the sequence number where it should
1553 * start receiving and acking broadcast packets.
1554 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001555static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1556 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001557{
1558 struct sk_buff_head list;
1559
1560 __skb_queue_head_init(&list);
1561 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1562 return;
1563 tipc_link_xmit(l, &list, xmitq);
1564}
1565
1566/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1567 */
1568void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1569{
1570 int mtyp = msg_type(hdr);
1571 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1572
1573 if (link_is_up(l))
1574 return;
1575
1576 if (msg_user(hdr) == BCAST_PROTOCOL) {
1577 l->rcv_nxt = peers_snd_nxt;
1578 l->state = LINK_ESTABLISHED;
1579 return;
1580 }
1581
1582 if (l->peer_caps & TIPC_BCAST_SYNCH)
1583 return;
1584
1585 if (msg_peer_node_is_up(hdr))
1586 return;
1587
1588 /* Compatibility: accept older, less safe initial synch data */
1589 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1590 l->rcv_nxt = peers_snd_nxt;
1591}
1592
1593/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1594 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001595int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1596 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001597{
1598 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001599 u16 from = msg_bcast_ack(hdr) + 1;
1600 u16 to = from + msg_bc_gap(hdr) - 1;
1601 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001602
1603 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001604 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001605
1606 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001607 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001608
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04001609 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1610 if (msg_ack(hdr))
1611 l->bc_peer_is_up = true;
1612
1613 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001614 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001615
1616 /* Ignore if peers_snd_nxt goes beyond receive window */
1617 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001618 return rc;
1619
1620 if (!less(to, from)) {
1621 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1622 l->stats.recv_nacks++;
1623 }
1624
1625 l->snd_nxt = peers_snd_nxt;
1626 if (link_bc_rcv_gap(l))
1627 rc |= TIPC_LINK_SND_STATE;
1628
1629 /* Return now if sender supports nack via STATE messages */
1630 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1631 return rc;
1632
1633 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001634
1635 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1636 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001637 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001638 }
1639
1640 /* Don't NACK if one was recently sent or peeked */
1641 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1642 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001643 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001644 }
1645
1646 /* Conditionally delay NACK sending until next synch rcv */
1647 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1648 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1649 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001650 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001651 }
1652
1653 /* Send NACK now but suppress next one */
1654 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1655 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001656 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001657}
1658
1659void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1660 struct sk_buff_head *xmitq)
1661{
1662 struct sk_buff *skb, *tmp;
1663 struct tipc_link *snd_l = l->bc_sndlink;
1664
1665 if (!link_is_up(l) || !l->bc_peer_is_up)
1666 return;
1667
1668 if (!more(acked, l->acked))
1669 return;
1670
1671 /* Skip over packets peer has already acked */
1672 skb_queue_walk(&snd_l->transmq, skb) {
1673 if (more(buf_seqno(skb), l->acked))
1674 break;
1675 }
1676
1677 /* Update/release the packets peer is acking now */
1678 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1679 if (more(buf_seqno(skb), acked))
1680 break;
1681 if (!--TIPC_SKB_CB(skb)->ackers) {
1682 __skb_unlink(skb, &snd_l->transmq);
1683 kfree_skb(skb);
1684 }
1685 }
1686 l->acked = acked;
1687 tipc_link_advance_backlog(snd_l, xmitq);
1688 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1689 link_prepare_wakeup(snd_l);
1690}
1691
1692/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001693 * This function is here for backwards compatibility, since
1694 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04001695 */
1696int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1697 struct sk_buff_head *xmitq)
1698{
1699 struct tipc_msg *hdr = buf_msg(skb);
1700 u32 dnode = msg_destnode(hdr);
1701 int mtyp = msg_type(hdr);
1702 u16 acked = msg_bcast_ack(hdr);
1703 u16 from = acked + 1;
1704 u16 to = msg_bcgap_to(hdr);
1705 u16 peers_snd_nxt = to + 1;
1706 int rc = 0;
1707
1708 kfree_skb(skb);
1709
1710 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1711 return 0;
1712
1713 if (mtyp != STATE_MSG)
1714 return 0;
1715
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001716 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001717 tipc_link_bc_ack_rcv(l, acked, xmitq);
1718 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1719 l->stats.recv_nacks++;
1720 return rc;
1721 }
1722
1723 /* Msg for other node => suppress own NACK at next sync if applicable */
1724 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1725 l->nack_state = BC_NACK_SND_SUPPRESS;
1726
1727 return 0;
1728}
1729
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001730void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001731{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001732 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001733
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001734 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04001735 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1736 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1737 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1738 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001739 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001740}
1741
Allan Stephens5c216e12011-10-18 11:34:29 -04001742/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001743 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05001744 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01001745 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001746void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001747{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001748 memset(&l->stats, 0, sizeof(l->stats));
1749 if (!link_is_bc_sndlink(l)) {
1750 l->stats.sent_info = l->snd_nxt;
1751 l->stats.recv_info = l->rcv_nxt;
1752 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001753}
1754
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001755static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001756{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001757 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001758 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001759 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001760
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001761 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001762 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1763 skb_queue_len(&l->transmq), head, tail,
1764 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001765}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001766
1767/* Parse and validate nested (link) properties valid for media, bearer and link
1768 */
1769int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1770{
1771 int err;
1772
1773 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1774 tipc_nl_prop_policy);
1775 if (err)
1776 return err;
1777
1778 if (props[TIPC_NLA_PROP_PRIO]) {
1779 u32 prio;
1780
1781 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1782 if (prio > TIPC_MAX_LINK_PRI)
1783 return -EINVAL;
1784 }
1785
1786 if (props[TIPC_NLA_PROP_TOL]) {
1787 u32 tol;
1788
1789 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1790 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1791 return -EINVAL;
1792 }
1793
1794 if (props[TIPC_NLA_PROP_WIN]) {
1795 u32 win;
1796
1797 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1798 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1799 return -EINVAL;
1800 }
1801
1802 return 0;
1803}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001804
Richard Alped8182802014-11-24 11:10:29 +01001805static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001806{
1807 int i;
1808 struct nlattr *stats;
1809
1810 struct nla_map {
1811 u32 key;
1812 u32 val;
1813 };
1814
1815 struct nla_map map[] = {
1816 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1817 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1818 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1819 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1820 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1821 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1822 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1823 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1824 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1825 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1826 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1827 s->msg_length_counts : 1},
1828 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1829 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1830 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1831 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1832 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1833 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1834 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1835 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1836 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1837 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1838 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1839 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1840 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1841 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1842 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1843 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1844 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1845 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1846 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1847 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1848 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1849 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1850 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1851 };
1852
1853 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1854 if (!stats)
1855 return -EMSGSIZE;
1856
1857 for (i = 0; i < ARRAY_SIZE(map); i++)
1858 if (nla_put_u32(skb, map[i].key, map[i].val))
1859 goto msg_full;
1860
1861 nla_nest_end(skb, stats);
1862
1863 return 0;
1864msg_full:
1865 nla_nest_cancel(skb, stats);
1866
1867 return -EMSGSIZE;
1868}
1869
1870/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001871int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1872 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001873{
1874 int err;
1875 void *hdr;
1876 struct nlattr *attrs;
1877 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001878 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001879
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001880 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001881 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001882 if (!hdr)
1883 return -EMSGSIZE;
1884
1885 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1886 if (!attrs)
1887 goto msg_full;
1888
1889 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1890 goto attr_msg_full;
1891 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001892 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001893 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001894 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001895 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001896 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001897 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001898 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001899 goto attr_msg_full;
1900
1901 if (tipc_link_is_up(link))
1902 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1903 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001904 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001905 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1906 goto attr_msg_full;
1907
1908 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1909 if (!prop)
1910 goto attr_msg_full;
1911 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1912 goto prop_msg_full;
1913 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1914 goto prop_msg_full;
1915 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001916 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001917 goto prop_msg_full;
1918 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1919 goto prop_msg_full;
1920 nla_nest_end(msg->skb, prop);
1921
1922 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1923 if (err)
1924 goto attr_msg_full;
1925
1926 nla_nest_end(msg->skb, attrs);
1927 genlmsg_end(msg->skb, hdr);
1928
1929 return 0;
1930
1931prop_msg_full:
1932 nla_nest_cancel(msg->skb, prop);
1933attr_msg_full:
1934 nla_nest_cancel(msg->skb, attrs);
1935msg_full:
1936 genlmsg_cancel(msg->skb, hdr);
1937
1938 return -EMSGSIZE;
1939}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001940
1941static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1942 struct tipc_stats *stats)
1943{
1944 int i;
1945 struct nlattr *nest;
1946
1947 struct nla_map {
1948 __u32 key;
1949 __u32 val;
1950 };
1951
1952 struct nla_map map[] = {
1953 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1954 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1955 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1956 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1957 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1958 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1959 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1960 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1961 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1962 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1963 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1964 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1965 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1966 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1967 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1968 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1969 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1970 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1971 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1972 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1973 };
1974
1975 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1976 if (!nest)
1977 return -EMSGSIZE;
1978
1979 for (i = 0; i < ARRAY_SIZE(map); i++)
1980 if (nla_put_u32(skb, map[i].key, map[i].val))
1981 goto msg_full;
1982
1983 nla_nest_end(skb, nest);
1984
1985 return 0;
1986msg_full:
1987 nla_nest_cancel(skb, nest);
1988
1989 return -EMSGSIZE;
1990}
1991
1992int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1993{
1994 int err;
1995 void *hdr;
1996 struct nlattr *attrs;
1997 struct nlattr *prop;
1998 struct tipc_net *tn = net_generic(net, tipc_net_id);
1999 struct tipc_link *bcl = tn->bcl;
2000
2001 if (!bcl)
2002 return 0;
2003
2004 tipc_bcast_lock(net);
2005
2006 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2007 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002008 if (!hdr) {
2009 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002010 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002011 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002012
2013 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2014 if (!attrs)
2015 goto msg_full;
2016
2017 /* The broadcast link is always up */
2018 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2019 goto attr_msg_full;
2020
2021 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2022 goto attr_msg_full;
2023 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2024 goto attr_msg_full;
2025 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
2026 goto attr_msg_full;
2027 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
2028 goto attr_msg_full;
2029
2030 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2031 if (!prop)
2032 goto attr_msg_full;
2033 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2034 goto prop_msg_full;
2035 nla_nest_end(msg->skb, prop);
2036
2037 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2038 if (err)
2039 goto attr_msg_full;
2040
2041 tipc_bcast_unlock(net);
2042 nla_nest_end(msg->skb, attrs);
2043 genlmsg_end(msg->skb, hdr);
2044
2045 return 0;
2046
2047prop_msg_full:
2048 nla_nest_cancel(msg->skb, prop);
2049attr_msg_full:
2050 nla_nest_cancel(msg->skb, attrs);
2051msg_full:
2052 tipc_bcast_unlock(net);
2053 genlmsg_cancel(msg->skb, hdr);
2054
2055 return -EMSGSIZE;
2056}
2057
Richard Alped01332f2016-02-01 08:19:56 +01002058void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2059 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002060{
2061 l->tolerance = tol;
Richard Alped01332f2016-02-01 08:19:56 +01002062 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002063}
2064
Richard Alped01332f2016-02-01 08:19:56 +01002065void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2066 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002067{
2068 l->priority = prio;
Richard Alped01332f2016-02-01 08:19:56 +01002069 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002070}
2071
2072void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2073{
2074 l->abort_limit = limit;
2075}