blob: 77ea1d4de2ba8677d478732066c57f14673347e7 [file] [log] [blame]
Simon Wunderliche19f9752014-01-04 18:04:25 +01001/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "main.h"
Antonio Quartulli785ea112011-11-23 11:35:44 +010019#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "originator.h"
21#include "hash.h"
22#include "translation-table.h"
23#include "routing.h"
24#include "gateway_client.h"
25#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "soft-interface.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010027#include "bridge_loop_avoidance.h"
Martin Hundebølld56b1702013-01-25 11:12:39 +010028#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020029#include "fragmentation.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010030#include "multicast.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000031
Antonio Quartullidec05072012-11-10 11:00:32 +010032/* hash class keys */
33static struct lock_class_key batadv_orig_hash_lock_class_key;
34
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020035static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000036
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020037/* returns 1 if they are the same originator */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020038int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020039{
Sven Eckelmann56303d32012-06-05 22:31:31 +020040 const void *data1 = container_of(node, struct batadv_orig_node,
41 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020042
dingtianhong323813e2013-12-26 19:40:39 +080043 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020044}
45
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020046/**
47 * batadv_orig_node_vlan_get - get an orig_node_vlan object
48 * @orig_node: the originator serving the VLAN
49 * @vid: the VLAN identifier
50 *
51 * Returns the vlan object identified by vid and belonging to orig_node or NULL
52 * if it does not exist.
53 */
54struct batadv_orig_node_vlan *
55batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
56 unsigned short vid)
57{
58 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
59
60 rcu_read_lock();
61 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
62 if (tmp->vid != vid)
63 continue;
64
65 if (!atomic_inc_not_zero(&tmp->refcount))
66 continue;
67
68 vlan = tmp;
69
70 break;
71 }
72 rcu_read_unlock();
73
74 return vlan;
75}
76
77/**
78 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
79 * object
80 * @orig_node: the originator serving the VLAN
81 * @vid: the VLAN identifier
82 *
83 * Returns NULL in case of failure or the vlan object identified by vid and
84 * belonging to orig_node otherwise. The object is created and added to the list
85 * if it does not exist.
86 *
87 * The object is returned with refcounter increased by 1.
88 */
89struct batadv_orig_node_vlan *
90batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
91 unsigned short vid)
92{
93 struct batadv_orig_node_vlan *vlan;
94
95 spin_lock_bh(&orig_node->vlan_list_lock);
96
97 /* first look if an object for this vid already exists */
98 vlan = batadv_orig_node_vlan_get(orig_node, vid);
99 if (vlan)
100 goto out;
101
102 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
103 if (!vlan)
104 goto out;
105
106 atomic_set(&vlan->refcount, 2);
107 vlan->vid = vid;
108
109 list_add_rcu(&vlan->list, &orig_node->vlan_list);
110
111out:
112 spin_unlock_bh(&orig_node->vlan_list_lock);
113
114 return vlan;
115}
116
117/**
118 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
119 * the originator-vlan object
120 * @orig_vlan: the originator-vlan object to release
121 */
122void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
123{
124 if (atomic_dec_and_test(&orig_vlan->refcount))
125 kfree_rcu(orig_vlan, rcu);
126}
127
Sven Eckelmann56303d32012-06-05 22:31:31 +0200128int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000129{
130 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200131 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000132
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200133 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000134
135 if (!bat_priv->orig_hash)
136 goto err;
137
Antonio Quartullidec05072012-11-10 11:00:32 +0100138 batadv_hash_set_lock_class(bat_priv->orig_hash,
139 &batadv_orig_hash_lock_class_key);
140
Antonio Quartulli72414442012-12-25 13:14:37 +0100141 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
142 queue_delayed_work(batadv_event_workqueue,
143 &bat_priv->orig_work,
144 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
145
Sven Eckelmann5346c352012-05-05 13:27:28 +0200146 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000147
148err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200149 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000150}
151
Simon Wunderlich89652332013-11-13 19:14:46 +0100152/**
Sven Eckelmann924224c2016-01-05 12:06:24 +0100153 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
154 * free after rcu grace period
Simon Wunderlich89652332013-11-13 19:14:46 +0100155 * @neigh_ifinfo: the neigh_ifinfo object to release
156 */
157static void
Sven Eckelmann924224c2016-01-05 12:06:24 +0100158batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
Simon Wunderlich89652332013-11-13 19:14:46 +0100159{
Sven Eckelmann924224c2016-01-05 12:06:24 +0100160 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
161 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
162
163 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100164}
165
166/**
Sven Eckelmann924224c2016-01-05 12:06:24 +0100167 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100168 * the neigh_ifinfo
169 * @neigh_ifinfo: the neigh_ifinfo object to release
170 */
171void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
172{
173 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
Sven Eckelmann924224c2016-01-05 12:06:24 +0100174 batadv_neigh_ifinfo_release(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100175}
176
177/**
Sven Eckelmannae3eb442016-01-05 12:06:25 +0100178 * batadv_neigh_node_release - release neigh_node from lists and queue for
179 * free after rcu grace period
180 * @neigh_node: neigh neighbor to free
Simon Wunderlich89652332013-11-13 19:14:46 +0100181 */
Sven Eckelmannae3eb442016-01-05 12:06:25 +0100182static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
Simon Wunderlich89652332013-11-13 19:14:46 +0100183{
184 struct hlist_node *node_tmp;
Simon Wunderlich89652332013-11-13 19:14:46 +0100185 struct batadv_neigh_ifinfo *neigh_ifinfo;
186
Simon Wunderlich89652332013-11-13 19:14:46 +0100187 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
188 &neigh_node->ifinfo_list, list) {
Sven Eckelmann924224c2016-01-05 12:06:24 +0100189 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100190 }
Sven Eckelmannae3eb442016-01-05 12:06:25 +0100191 batadv_hardif_free_ref(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100192
Sven Eckelmannae3eb442016-01-05 12:06:25 +0100193 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100194}
195
196/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100197 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
Sven Eckelmann620493a2016-01-05 12:06:22 +0100198 * and possibly release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100199 * @neigh_node: neigh neighbor to free
200 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200201void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000202{
Marek Lindner44524fc2011-02-10 14:33:53 +0000203 if (atomic_dec_and_test(&neigh_node->refcount))
Sven Eckelmannae3eb442016-01-05 12:06:25 +0100204 batadv_neigh_node_release(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000205}
206
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100207/**
208 * batadv_orig_node_get_router - router to the originator depending on iface
209 * @orig_node: the orig node for the router
210 * @if_outgoing: the interface where the payload packet has been received or
211 * the OGM should be sent to
212 *
213 * Returns the neighbor which should be router for this orig_node/iface.
214 *
215 * The object is returned with refcounter increased by 1.
216 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200217struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100218batadv_orig_router_get(struct batadv_orig_node *orig_node,
219 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000220{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100221 struct batadv_orig_ifinfo *orig_ifinfo;
222 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000223
224 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100225 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
226 if (orig_ifinfo->if_outgoing != if_outgoing)
227 continue;
228
229 router = rcu_dereference(orig_ifinfo->router);
230 break;
231 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000232
233 if (router && !atomic_inc_not_zero(&router->refcount))
234 router = NULL;
235
236 rcu_read_unlock();
237 return router;
238}
239
Antonio Quartulli0538f752013-09-02 12:15:01 +0200240/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100241 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
242 * @orig_node: the orig node to be queried
243 * @if_outgoing: the interface for which the ifinfo should be acquired
244 *
245 * Returns the requested orig_ifinfo or NULL if not found.
246 *
247 * The object is returned with refcounter increased by 1.
248 */
249struct batadv_orig_ifinfo *
250batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
251 struct batadv_hard_iface *if_outgoing)
252{
253 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
254
255 rcu_read_lock();
256 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
257 list) {
258 if (tmp->if_outgoing != if_outgoing)
259 continue;
260
261 if (!atomic_inc_not_zero(&tmp->refcount))
262 continue;
263
264 orig_ifinfo = tmp;
265 break;
266 }
267 rcu_read_unlock();
268
269 return orig_ifinfo;
270}
271
272/**
273 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
274 * @orig_node: the orig node to be queried
275 * @if_outgoing: the interface for which the ifinfo should be acquired
276 *
277 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
278 * interface otherwise. The object is created and added to the list
279 * if it does not exist.
280 *
281 * The object is returned with refcounter increased by 1.
282 */
283struct batadv_orig_ifinfo *
284batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
285 struct batadv_hard_iface *if_outgoing)
286{
287 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
288 unsigned long reset_time;
289
290 spin_lock_bh(&orig_node->neigh_list_lock);
291
292 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
293 if (orig_ifinfo)
294 goto out;
295
296 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
297 if (!orig_ifinfo)
298 goto out;
299
300 if (if_outgoing != BATADV_IF_DEFAULT &&
301 !atomic_inc_not_zero(&if_outgoing->refcount)) {
302 kfree(orig_ifinfo);
303 orig_ifinfo = NULL;
304 goto out;
305 }
306
307 reset_time = jiffies - 1;
308 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
309 orig_ifinfo->batman_seqno_reset = reset_time;
310 orig_ifinfo->if_outgoing = if_outgoing;
311 INIT_HLIST_NODE(&orig_ifinfo->list);
312 atomic_set(&orig_ifinfo->refcount, 2);
313 hlist_add_head_rcu(&orig_ifinfo->list,
314 &orig_node->ifinfo_list);
315out:
316 spin_unlock_bh(&orig_node->neigh_list_lock);
317 return orig_ifinfo;
318}
319
320/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100321 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
322 * @neigh_node: the neigh node to be queried
323 * @if_outgoing: the interface for which the ifinfo should be acquired
324 *
325 * The object is returned with refcounter increased by 1.
326 *
327 * Returns the requested neigh_ifinfo or NULL if not found
328 */
329struct batadv_neigh_ifinfo *
330batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
331 struct batadv_hard_iface *if_outgoing)
332{
333 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
334 *tmp_neigh_ifinfo;
335
336 rcu_read_lock();
337 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
338 list) {
339 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
340 continue;
341
342 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
343 continue;
344
345 neigh_ifinfo = tmp_neigh_ifinfo;
346 break;
347 }
348 rcu_read_unlock();
349
350 return neigh_ifinfo;
351}
352
353/**
354 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
355 * @neigh_node: the neigh node to be queried
356 * @if_outgoing: the interface for which the ifinfo should be acquired
357 *
358 * Returns NULL in case of failure or the neigh_ifinfo object for the
359 * if_outgoing interface otherwise. The object is created and added to the list
360 * if it does not exist.
361 *
362 * The object is returned with refcounter increased by 1.
363 */
364struct batadv_neigh_ifinfo *
365batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
366 struct batadv_hard_iface *if_outgoing)
367{
368 struct batadv_neigh_ifinfo *neigh_ifinfo;
369
370 spin_lock_bh(&neigh->ifinfo_lock);
371
372 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
373 if (neigh_ifinfo)
374 goto out;
375
376 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
377 if (!neigh_ifinfo)
378 goto out;
379
380 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
381 kfree(neigh_ifinfo);
382 neigh_ifinfo = NULL;
383 goto out;
384 }
385
386 INIT_HLIST_NODE(&neigh_ifinfo->list);
387 atomic_set(&neigh_ifinfo->refcount, 2);
388 neigh_ifinfo->if_outgoing = if_outgoing;
389
390 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
391
392out:
393 spin_unlock_bh(&neigh->ifinfo_lock);
394
395 return neigh_ifinfo;
396}
397
398/**
Antonio Quartulli0538f752013-09-02 12:15:01 +0200399 * batadv_neigh_node_new - create and init a new neigh_node object
400 * @hard_iface: the interface where the neighbour is connected to
401 * @neigh_addr: the mac address of the neighbour interface
402 * @orig_node: originator object representing the neighbour
403 *
404 * Allocates a new neigh_node object and initialises all the generic fields.
405 * Returns the new object or NULL on failure.
406 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200407struct batadv_neigh_node *
408batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200409 const uint8_t *neigh_addr,
410 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000411{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200412 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000413
Sven Eckelmann704509b2011-05-14 23:14:54 +0200414 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000415 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800416 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000417
Marek Lindner9591a792010-12-12 21:57:11 +0000418 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100419 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
420 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000421
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100422 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200423 neigh_node->if_incoming = hard_iface;
424 neigh_node->orig_node = orig_node;
425
Marek Lindner1605d0d2011-02-18 12:28:11 +0000426 /* extra reference for return */
427 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000428
Marek Lindner7ae8b282012-03-01 15:35:21 +0800429out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000430 return neigh_node;
431}
432
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100433/**
Antonio Quartulli08bf0ed2014-01-29 11:25:12 +0100434 * batadv_neigh_node_get - retrieve a neighbour from the list
435 * @orig_node: originator which the neighbour belongs to
436 * @hard_iface: the interface where this neighbour is connected to
437 * @addr: the address of the neighbour
438 *
439 * Looks for and possibly returns a neighbour belonging to this originator list
440 * which is connected through the provided hard interface.
441 * Returns NULL if the neighbour is not found.
442 */
443struct batadv_neigh_node *
444batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
445 const struct batadv_hard_iface *hard_iface,
446 const uint8_t *addr)
447{
448 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
449
450 rcu_read_lock();
451 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
452 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
453 continue;
454
455 if (tmp_neigh_node->if_incoming != hard_iface)
456 continue;
457
458 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
459 continue;
460
461 res = tmp_neigh_node;
462 break;
463 }
464 rcu_read_unlock();
465
466 return res;
467}
468
469/**
Sven Eckelmann620493a2016-01-05 12:06:22 +0100470 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
471 * free after rcu grace period
472 * @orig_ifinfo: the orig_ifinfo object to release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100473 */
Sven Eckelmann620493a2016-01-05 12:06:22 +0100474static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100475{
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100476 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100477
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100478 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann620493a2016-01-05 12:06:22 +0100479 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100480
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100481 /* this is the last reference to this object */
482 router = rcu_dereference_protected(orig_ifinfo->router, true);
483 if (router)
Sven Eckelmann620493a2016-01-05 12:06:22 +0100484 batadv_neigh_node_free_ref(router);
485
486 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100487}
488
489/**
Sven Eckelmann9d188c6b2016-01-05 12:06:21 +0100490 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100491 * the orig_ifinfo
492 * @orig_ifinfo: the orig_ifinfo object to release
493 */
494void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
495{
496 if (atomic_dec_and_test(&orig_ifinfo->refcount))
Sven Eckelmann620493a2016-01-05 12:06:22 +0100497 batadv_orig_ifinfo_release(orig_ifinfo);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100498}
499
Sven Eckelmann9d188c6b2016-01-05 12:06:21 +0100500/**
501 * batadv_orig_node_free_rcu - free the orig_node
502 * @rcu: rcu pointer of the orig_node
503 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200504static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000505{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200506 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000507
Sven Eckelmann56303d32012-06-05 22:31:31 +0200508 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000509
Linus Lüssing60432d72014-02-15 17:47:51 +0100510 batadv_mcast_purge_orig(orig_node);
511
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200512 batadv_frag_purge_orig(orig_node, NULL);
513
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200514 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
515 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
516
Antonio Quartullia73105b2011-04-27 14:27:44 +0200517 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000518 kfree(orig_node);
519}
520
Linus Lüssing72822222013-04-15 21:43:29 +0800521/**
Sven Eckelmann9d188c6b2016-01-05 12:06:21 +0100522 * batadv_orig_node_release - release orig_node from lists and queue for
523 * free after rcu grace period
524 * @orig_node: the orig node to free
525 */
526static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
527{
528 struct hlist_node *node_tmp;
529 struct batadv_neigh_node *neigh_node;
530 struct batadv_orig_ifinfo *orig_ifinfo;
531
532 spin_lock_bh(&orig_node->neigh_list_lock);
533
534 /* for all neighbors towards this originator ... */
535 hlist_for_each_entry_safe(neigh_node, node_tmp,
536 &orig_node->neigh_list, list) {
537 hlist_del_rcu(&neigh_node->list);
538 batadv_neigh_node_free_ref(neigh_node);
539 }
540
541 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
542 &orig_node->ifinfo_list, list) {
543 hlist_del_rcu(&orig_ifinfo->list);
544 batadv_orig_ifinfo_free_ref(orig_ifinfo);
545 }
546 spin_unlock_bh(&orig_node->neigh_list_lock);
547
548 /* Free nc_nodes */
549 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
550
551 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
552}
553
554/**
Linus Lüssing72822222013-04-15 21:43:29 +0800555 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
Sven Eckelmann9d188c6b2016-01-05 12:06:21 +0100556 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800557 * @orig_node: the orig node to free
558 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200559void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000560{
561 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann9d188c6b2016-01-05 12:06:21 +0100562 batadv_orig_node_release(orig_node);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000563}
564
Sven Eckelmann56303d32012-06-05 22:31:31 +0200565void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000566{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200567 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800568 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000569 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000570 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200571 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200572 uint32_t i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000573
574 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000575 return;
576
577 cancel_delayed_work_sync(&bat_priv->orig_work);
578
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000579 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000580
581 for (i = 0; i < hash->size; i++) {
582 head = &hash->table[i];
583 list_lock = &hash->list_locks[i];
584
585 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800586 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000587 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800588 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200589 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000590 }
591 spin_unlock_bh(list_lock);
592 }
593
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200594 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000595}
596
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200597/**
598 * batadv_orig_node_new - creates a new orig_node
599 * @bat_priv: the bat priv with all the soft interface information
600 * @addr: the mac address of the originator
601 *
602 * Creates a new originator object and initialise all the generic fields.
603 * The new object is not added to the originator list.
604 * Returns the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200605 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200606struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200607 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000608{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200609 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200610 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200611 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200612 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000613
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200614 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
615 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000616
Sven Eckelmann704509b2011-05-14 23:14:54 +0200617 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000618 if (!orig_node)
619 return NULL;
620
Marek Lindner9591a792010-12-12 21:57:11 +0000621 INIT_HLIST_HEAD(&orig_node->neigh_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200622 INIT_LIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100623 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000624 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000625 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200626 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200627 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200628 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000629
Martin Hundebølld56b1702013-01-25 11:12:39 +0100630 batadv_nc_init_orig(orig_node);
631
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000632 /* extra reference for return */
633 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000634
Marek Lindner16b1aba2011-01-19 20:01:42 +0000635 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100636 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100637 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200638 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200639 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200640 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100641 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200642 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
643 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing3e6263c2015-06-16 17:10:26 +0200644
Linus Lüssing60432d72014-02-15 17:47:51 +0100645#ifdef CONFIG_BATMAN_ADV_MCAST
646 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing3e6263c2015-06-16 17:10:26 +0200647 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
648 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
649 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
650 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100651#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000652
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200653 /* create a vlan object for the "untagged" LAN */
654 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
655 if (!vlan)
656 goto free_orig_node;
657 /* batadv_orig_node_vlan_new() increases the refcounter.
658 * Immediately release vlan since it is not needed anymore in this
659 * context
660 */
661 batadv_orig_node_vlan_free_ref(vlan);
662
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200663 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
664 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
665 spin_lock_init(&orig_node->fragments[i].lock);
666 orig_node->fragments[i].size = 0;
667 }
668
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000669 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670free_orig_node:
671 kfree(orig_node);
672 return NULL;
673}
674
Simon Wunderlich89652332013-11-13 19:14:46 +0100675/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100676 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
677 * @bat_priv: the bat priv with all the soft interface information
678 * @neigh: orig node which is to be checked
679 */
680static void
681batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
682 struct batadv_neigh_node *neigh)
683{
684 struct batadv_neigh_ifinfo *neigh_ifinfo;
685 struct batadv_hard_iface *if_outgoing;
686 struct hlist_node *node_tmp;
687
688 spin_lock_bh(&neigh->ifinfo_lock);
689
690 /* for all ifinfo objects for this neighinator */
691 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
692 &neigh->ifinfo_list, list) {
693 if_outgoing = neigh_ifinfo->if_outgoing;
694
695 /* always keep the default interface */
696 if (if_outgoing == BATADV_IF_DEFAULT)
697 continue;
698
699 /* don't purge if the interface is not (going) down */
700 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
701 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
702 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
703 continue;
704
705 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
706 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
707 neigh->addr, if_outgoing->net_dev->name);
708
709 hlist_del_rcu(&neigh_ifinfo->list);
710 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
711 }
712
713 spin_unlock_bh(&neigh->ifinfo_lock);
714}
715
716/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100717 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
718 * @bat_priv: the bat priv with all the soft interface information
719 * @orig_node: orig node which is to be checked
720 *
721 * Returns true if any ifinfo entry was purged, false otherwise.
722 */
723static bool
724batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
725 struct batadv_orig_node *orig_node)
726{
727 struct batadv_orig_ifinfo *orig_ifinfo;
728 struct batadv_hard_iface *if_outgoing;
729 struct hlist_node *node_tmp;
730 bool ifinfo_purged = false;
731
732 spin_lock_bh(&orig_node->neigh_list_lock);
733
734 /* for all ifinfo objects for this originator */
735 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
736 &orig_node->ifinfo_list, list) {
737 if_outgoing = orig_ifinfo->if_outgoing;
738
739 /* always keep the default interface */
740 if (if_outgoing == BATADV_IF_DEFAULT)
741 continue;
742
743 /* don't purge if the interface is not (going) down */
744 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
745 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
746 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
747 continue;
748
749 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
750 "router/ifinfo purge: originator %pM, iface: %s\n",
751 orig_node->orig, if_outgoing->net_dev->name);
752
753 ifinfo_purged = true;
754
755 hlist_del_rcu(&orig_ifinfo->list);
756 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +0100757 if (orig_node->last_bonding_candidate == orig_ifinfo) {
758 orig_node->last_bonding_candidate = NULL;
759 batadv_orig_ifinfo_free_ref(orig_ifinfo);
760 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100761 }
762
763 spin_unlock_bh(&orig_node->neigh_list_lock);
764
765 return ifinfo_purged;
766}
767
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100768/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100769 * batadv_purge_orig_neighbors - purges neighbors from originator
770 * @bat_priv: the bat priv with all the soft interface information
771 * @orig_node: orig node which is to be checked
772 *
773 * Returns true if any neighbor was purged, false otherwise
774 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200775static bool
776batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +0100777 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000778{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800779 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200780 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000781 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800782 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200783 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000784
Marek Lindnerf987ed62010-12-12 21:57:12 +0000785 spin_lock_bh(&orig_node->neigh_list_lock);
786
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000787 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800788 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000789 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200790 last_seen = neigh_node->last_seen;
791 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000792
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200793 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200794 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
795 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
796 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200797 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
798 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
799 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200800 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200801 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
802 orig_node->orig, neigh_node->addr,
803 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000804 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200805 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200806 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
807 orig_node->orig, neigh_node->addr,
808 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000809
810 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000811
Marek Lindnerf987ed62010-12-12 21:57:12 +0000812 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200813 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +0100814 } else {
815 /* only necessary if not the whole neighbor is to be
816 * deleted, but some interface has been removed.
817 */
818 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000819 }
820 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000821
822 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000823 return neigh_purged;
824}
825
Simon Wunderlich89652332013-11-13 19:14:46 +0100826/**
827 * batadv_find_best_neighbor - finds the best neighbor after purging
828 * @bat_priv: the bat priv with all the soft interface information
829 * @orig_node: orig node which is to be checked
830 * @if_outgoing: the interface for which the metric should be compared
831 *
832 * Returns the current best neighbor, with refcount increased.
833 */
834static struct batadv_neigh_node *
835batadv_find_best_neighbor(struct batadv_priv *bat_priv,
836 struct batadv_orig_node *orig_node,
837 struct batadv_hard_iface *if_outgoing)
838{
839 struct batadv_neigh_node *best = NULL, *neigh;
840 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
841
842 rcu_read_lock();
843 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
844 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
845 best, if_outgoing) <= 0))
846 continue;
847
848 if (!atomic_inc_not_zero(&neigh->refcount))
849 continue;
850
851 if (best)
852 batadv_neigh_node_free_ref(best);
853
854 best = neigh;
855 }
856 rcu_read_unlock();
857
858 return best;
859}
860
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100861/**
862 * batadv_purge_orig_node - purges obsolete information from an orig_node
863 * @bat_priv: the bat priv with all the soft interface information
864 * @orig_node: orig node which is to be checked
865 *
866 * This function checks if the orig_node or substructures of it have become
867 * obsolete, and purges this information if that's the case.
868 *
869 * Returns true if the orig_node is to be removed, false otherwise.
870 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200871static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
872 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000873{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200874 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100875 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100876 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000877
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200878 if (batadv_has_timed_out(orig_node->last_seen,
879 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200880 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200881 "Originator timeout: originator %pM, last_seen %u\n",
882 orig_node->orig,
883 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000884 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000885 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100886 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
887 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100888
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100889 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +0100890 return false;
891
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100892 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +0100893 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
894 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100895 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
896 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +0100897 if (best_neigh_node)
898 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000899
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100900 /* ... then for all other interfaces. */
901 rcu_read_lock();
902 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
903 if (hard_iface->if_status != BATADV_IF_ACTIVE)
904 continue;
905
906 if (hard_iface->soft_iface != bat_priv->soft_iface)
907 continue;
908
909 best_neigh_node = batadv_find_best_neighbor(bat_priv,
910 orig_node,
911 hard_iface);
912 batadv_update_route(bat_priv, orig_node, hard_iface,
913 best_neigh_node);
914 if (best_neigh_node)
915 batadv_neigh_node_free_ref(best_neigh_node);
916 }
917 rcu_read_unlock();
918
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000919 return false;
920}
921
Sven Eckelmann56303d32012-06-05 22:31:31 +0200922static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000923{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200924 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800925 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000926 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000927 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200928 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200929 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000930
931 if (!hash)
932 return;
933
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000934 /* for all origins... */
935 for (i = 0; i < hash->size; i++) {
936 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000937 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000938
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000939 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800940 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000941 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200942 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +0800943 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800944 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +0100945 batadv_tt_global_del_orig(orig_node->bat_priv,
946 orig_node, -1,
947 "originator timed out");
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200948 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000949 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000950 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200951
952 batadv_frag_purge_orig(orig_node,
953 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000954 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000955 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000956 }
957
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200958 batadv_gw_node_purge(bat_priv);
959 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000960}
961
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200962static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000963{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200964 struct delayed_work *delayed_work;
965 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000966
Sven Eckelmann56303d32012-06-05 22:31:31 +0200967 delayed_work = container_of(work, struct delayed_work, work);
968 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200969 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +0100970 queue_delayed_work(batadv_event_workqueue,
971 &bat_priv->orig_work,
972 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000973}
974
Sven Eckelmann56303d32012-06-05 22:31:31 +0200975void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000976{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200977 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000978}
979
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200980int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000981{
982 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200983 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200984 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000985
Marek Lindner30da63a2012-08-03 17:15:46 +0200986 primary_if = batadv_seq_print_text_primary_if_get(seq);
987 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200988 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000989
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200990 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200991 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200992 primary_if->net_dev->dev_addr, net_dev->name,
993 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000994
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200995 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000996
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200997 if (!bat_priv->bat_algo_ops->bat_orig_print) {
998 seq_puts(seq,
999 "No printing function for this routing protocol\n");
1000 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001001 }
1002
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001003 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1004 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001005
Marek Lindner30da63a2012-08-03 17:15:46 +02001006 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001007}
1008
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001009/**
1010 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1011 * outgoing interface
1012 * @seq: debugfs table seq_file struct
1013 * @offset: not used
1014 *
1015 * Returns 0
1016 */
1017int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1018{
1019 struct net_device *net_dev = (struct net_device *)seq->private;
1020 struct batadv_hard_iface *hard_iface;
1021 struct batadv_priv *bat_priv;
1022
1023 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1024
1025 if (!hard_iface || !hard_iface->soft_iface) {
1026 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1027 goto out;
1028 }
1029
1030 bat_priv = netdev_priv(hard_iface->soft_iface);
1031 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1032 seq_puts(seq,
1033 "No printing function for this routing protocol\n");
1034 goto out;
1035 }
1036
1037 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1038 seq_puts(seq, "Interface not active\n");
1039 goto out;
1040 }
1041
1042 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1043 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1044 hard_iface->net_dev->dev_addr,
1045 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1046
1047 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1048
1049out:
Marek Lindner16a41422014-04-24 03:44:25 +08001050 if (hard_iface)
1051 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92ec2013-11-21 11:52:16 +01001052 return 0;
1053}
1054
Sven Eckelmann56303d32012-06-05 22:31:31 +02001055int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1056 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001057{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001058 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001059 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001060 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001061 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001062 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001063 uint32_t i;
1064 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001065
1066 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001067 * if_num
1068 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001069 for (i = 0; i < hash->size; i++) {
1070 head = &hash->table[i];
1071
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001072 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001073 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001074 ret = 0;
1075 if (bao->bat_orig_add_if)
1076 ret = bao->bat_orig_add_if(orig_node,
1077 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001078 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001079 goto err;
1080 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001081 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001082 }
1083
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001084 return 0;
1085
1086err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001087 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001088 return -ENOMEM;
1089}
1090
Sven Eckelmann56303d32012-06-05 22:31:31 +02001091int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1092 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001093{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001094 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001095 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001096 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001097 struct batadv_hard_iface *hard_iface_tmp;
1098 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001099 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001100 uint32_t i;
1101 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001102
1103 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001104 * if_num
1105 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001106 for (i = 0; i < hash->size; i++) {
1107 head = &hash->table[i];
1108
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001109 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001110 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001111 ret = 0;
1112 if (bao->bat_orig_del_if)
1113 ret = bao->bat_orig_del_if(orig_node,
1114 max_if_num,
1115 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001116 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001117 goto err;
1118 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001119 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001120 }
1121
1122 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1123 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001124 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001125 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001126 continue;
1127
Marek Lindnere6c10f42011-02-18 12:33:20 +00001128 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001129 continue;
1130
Marek Lindnere6c10f42011-02-18 12:33:20 +00001131 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001132 continue;
1133
Marek Lindnere6c10f42011-02-18 12:33:20 +00001134 if (hard_iface_tmp->if_num > hard_iface->if_num)
1135 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001136 }
1137 rcu_read_unlock();
1138
Marek Lindnere6c10f42011-02-18 12:33:20 +00001139 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001140 return 0;
1141
1142err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001143 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001144 return -ENOMEM;
1145}