From c476efbcde5ba58b81ac752f4a894d6db8e17d94 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 11 May 2010 14:40:48 +0200 Subject: ipv6: ip6mr: move unres_queue and timer to per-namespace data The unres_queue is currently shared between all namespaces. Following patches will additionally allow to create multiple multicast routing tables in each namespace. Having a single shared queue for all these users seems to excessive, move the queue and the cleanup timer to the per-namespace data to unshare it. As a side-effect, this fixes a bug in the seq file iteration functions: the first entry returned is always from the current namespace, entries returned after that may belong to any namespace. Signed-off-by: Patrick McHardy --- net/ipv6/ip6mr.c | 74 +++++++++++++++++++++++++------------------------------- 1 file changed, 33 insertions(+), 41 deletions(-) (limited to 'net/ipv6') diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e0b530ca394..7236030e403 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -63,8 +63,6 @@ static DEFINE_RWLOCK(mrt_lock); #define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) -static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ - /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); @@ -84,8 +82,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); static void mroute_clean_tables(struct net *net); -static struct timer_list ipmr_expire_timer; - #ifdef CONFIG_PROC_FS @@ -110,11 +106,10 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, return mfc; read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; + it->cache = &net->ipv6.mfc6_unres_queue; spin_lock_bh(&mfc_unres_lock); - for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) - if (net_eq(mfc6_net(mfc), net) && - pos-- == 0) + for (mfc = net->ipv6.mfc6_unres_queue; mfc; mfc = mfc->next) + if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -244,7 +239,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (mfc->next) return mfc->next; - if (it->cache == &mfc_unres_queue) + if (it->cache == &net->ipv6.mfc6_unres_queue) goto end_of_list; BUG_ON(it->cache != net->ipv6.mfc6_cache_array); @@ -257,11 +252,11 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) /* exhausted cache_array, show unresolved */ read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; + it->cache = &net->ipv6.mfc6_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); - mfc = mfc_unres_queue; + mfc = net->ipv6.mfc6_unres_queue; if (mfc) return mfc; @@ -277,7 +272,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); - if (it->cache == &mfc_unres_queue) + if (it->cache == &net->ipv6.mfc6_unres_queue) spin_unlock_bh(&mfc_unres_lock); else if (it->cache == net->ipv6.mfc6_cache_array) read_unlock(&mrt_lock); @@ -301,7 +296,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, mfc->mf6c_parent); - if (it->cache != &mfc_unres_queue) { + if (it->cache != &net->ipv6.mfc6_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, @@ -559,15 +554,15 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) } -/* Single timer process for all the unresolved queue. */ +/* Timer process for all the unresolved queue. */ -static void ipmr_do_expire_process(unsigned long dummy) +static void ipmr_do_expire_process(struct net *net) { unsigned long now = jiffies; unsigned long expires = 10 * HZ; struct mfc6_cache *c, **cp; - cp = &mfc_unres_queue; + cp = &net->ipv6.mfc6_unres_queue; while ((c = *cp) != NULL) { if (time_after(c->mfc_un.unres.expires, now)) { @@ -583,19 +578,21 @@ static void ipmr_do_expire_process(unsigned long dummy) ip6mr_destroy_unres(c); } - if (mfc_unres_queue != NULL) - mod_timer(&ipmr_expire_timer, jiffies + expires); + if (net->ipv6.mfc6_unres_queue != NULL) + mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires); } -static void ipmr_expire_process(unsigned long dummy) +static void ipmr_expire_process(unsigned long arg) { + struct net *net = (struct net *)arg; + if (!spin_trylock(&mfc_unres_lock)) { - mod_timer(&ipmr_expire_timer, jiffies + 1); + mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + 1); return; } - if (mfc_unres_queue != NULL) - ipmr_do_expire_process(dummy); + if (net->ipv6.mfc6_unres_queue != NULL) + ipmr_do_expire_process(net); spin_unlock(&mfc_unres_lock); } @@ -880,9 +877,8 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) struct mfc6_cache *c; spin_lock_bh(&mfc_unres_lock); - for (c = mfc_unres_queue; c; c = c->next) { - if (net_eq(mfc6_net(c), net) && - ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && + for (c = net->ipv6.mfc6_unres_queue; c; c = c->next) { + if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) break; } @@ -923,10 +919,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) } atomic_inc(&net->ipv6.cache_resolve_queue_len); - c->next = mfc_unres_queue; - mfc_unres_queue = c; + c->next = net->ipv6.mfc6_unres_queue; + net->ipv6.mfc6_unres_queue = c; - ipmr_do_expire_process(1); + ipmr_do_expire_process(net); } /* @@ -1019,6 +1015,9 @@ static int __net_init ip6mr_net_init(struct net *net) goto fail_mfc6_cache; } + setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process, + (unsigned long)net); + #ifdef CONFIG_IPV6_PIMSM_V2 net->ipv6.mroute_reg_vif_num = -1; #endif @@ -1050,6 +1049,7 @@ static void __net_exit ip6mr_net_exit(struct net *net) proc_net_remove(net, "ip6_mr_cache"); proc_net_remove(net, "ip6_mr_vif"); #endif + del_timer(&net->ipv6.ipmr_expire_timer); mroute_clean_tables(net); kfree(net->ipv6.mfc6_cache_array); kfree(net->ipv6.vif6_table); @@ -1075,7 +1075,6 @@ int __init ip6_mr_init(void) if (err) goto reg_pernet_fail; - setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); err = register_netdevice_notifier(&ip6_mr_notifier); if (err) goto reg_notif_fail; @@ -1092,7 +1091,6 @@ add_proto_fail: unregister_netdevice_notifier(&ip6_mr_notifier); #endif reg_notif_fail: - del_timer(&ipmr_expire_timer); unregister_pernet_subsys(&ip6mr_net_ops); reg_pernet_fail: kmem_cache_destroy(mrt_cachep); @@ -1102,7 +1100,6 @@ reg_pernet_fail: void ip6_mr_cleanup(void) { unregister_netdevice_notifier(&ip6_mr_notifier); - del_timer(&ipmr_expire_timer); unregister_pernet_subsys(&ip6mr_net_ops); kmem_cache_destroy(mrt_cachep); } @@ -1167,18 +1164,17 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) * need to send on the frames and tidy up. */ spin_lock_bh(&mfc_unres_lock); - for (cp = &mfc_unres_queue; (uc = *cp) != NULL; + for (cp = &net->ipv6.mfc6_unres_queue; (uc = *cp) != NULL; cp = &uc->next) { - if (net_eq(mfc6_net(uc), net) && - ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && + if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { *cp = uc->next; atomic_dec(&net->ipv6.cache_resolve_queue_len); break; } } - if (mfc_unres_queue == NULL) - del_timer(&ipmr_expire_timer); + if (net->ipv6.mfc6_unres_queue == NULL) + del_timer(&net->ipv6.ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (uc) { @@ -1230,12 +1226,8 @@ static void mroute_clean_tables(struct net *net) struct mfc6_cache *c, **cp; spin_lock_bh(&mfc_unres_lock); - cp = &mfc_unres_queue; + cp = &net->ipv6.mfc6_unres_queue; while ((c = *cp) != NULL) { - if (!net_eq(mfc6_net(c), net)) { - cp = &c->next; - continue; - } *cp = c->next; ip6mr_destroy_unres(c); } -- cgit v1.2.3 From b5aa30b19121de49021fba57aa1f6e4c787fcf67 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 11 May 2010 14:40:50 +0200 Subject: ipv6: ip6mr: remove net pointer from struct mfc6_cache Now that cache entries in unres_queue don't need to be distinguished by their network namespace pointer anymore, we can remove it from struct mfc6_cache add pass the namespace as function argument to the functions that need it. Signed-off-by: Patrick McHardy --- net/ipv6/ip6mr.c | 63 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 31 insertions(+), 32 deletions(-) (limited to 'net/ipv6') diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 7236030e403..b3783a436bb 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -76,10 +76,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; -static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache); +static int ip6_mr_forward(struct net *net, struct sk_buff *skb, + struct mfc6_cache *cache); static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, int assert); -static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); +static int ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, + struct mfc6_cache *c, struct rtmsg *rtm); static void mroute_clean_tables(struct net *net); @@ -523,7 +525,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) static inline void ip6mr_cache_free(struct mfc6_cache *c) { - release_net(mfc6_net(c)); kmem_cache_free(mrt_cachep, c); } @@ -531,10 +532,9 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c) and reporting error to netlink readers. */ -static void ip6mr_destroy_unres(struct mfc6_cache *c) +static void ip6mr_destroy_unres(struct net *net, struct mfc6_cache *c) { struct sk_buff *skb; - struct net *net = mfc6_net(c); atomic_dec(&net->ipv6.cache_resolve_queue_len); @@ -575,7 +575,7 @@ static void ipmr_do_expire_process(struct net *net) } *cp = c->next; - ip6mr_destroy_unres(c); + ip6mr_destroy_unres(net, c); } if (net->ipv6.mfc6_unres_queue != NULL) @@ -599,10 +599,10 @@ static void ipmr_expire_process(unsigned long arg) /* Fill oifs list. It is called under write locked mrt_lock. */ -static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls) +static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache, + unsigned char *ttls) { int vifi; - struct net *net = mfc6_net(cache); cache->mfc_un.res.minvif = MAXMIFS; cache->mfc_un.res.maxvif = 0; @@ -717,24 +717,22 @@ static struct mfc6_cache *ip6mr_cache_find(struct net *net, /* * Allocate a multicast cache entry */ -static struct mfc6_cache *ip6mr_cache_alloc(struct net *net) +static struct mfc6_cache *ip6mr_cache_alloc(void) { struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (c == NULL) return NULL; c->mfc_un.res.minvif = MAXMIFS; - mfc6_net_set(c, net); return c; } -static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) +static struct mfc6_cache *ip6mr_cache_alloc_unres(void) { struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if (c == NULL) return NULL; skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10 * HZ; - mfc6_net_set(c, net); return c; } @@ -742,7 +740,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) * A cache entry has gone into a resolved state from queued */ -static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) +static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc, + struct mfc6_cache *c) { struct sk_buff *skb; @@ -755,7 +754,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) int err; struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); - if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { + if (ip6mr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; @@ -763,9 +762,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) skb_trim(skb, nlh->nlmsg_len); ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; } - err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid); + err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else - ip6_mr_forward(skb, c); + ip6_mr_forward(net, skb, c); } } @@ -889,7 +888,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) */ if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || - (c = ip6mr_cache_alloc_unres(net)) == NULL) { + (c = ip6mr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); @@ -1133,7 +1132,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) if (c != NULL) { write_lock_bh(&mrt_lock); c->mf6c_parent = mfc->mf6cc_parent; - ip6mr_update_thresholds(c, ttls); + ip6mr_update_thresholds(net, c, ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); @@ -1143,14 +1142,14 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) return -EINVAL; - c = ip6mr_cache_alloc(net); + c = ip6mr_cache_alloc(); if (c == NULL) return -ENOMEM; c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; c->mf6c_parent = mfc->mf6cc_parent; - ip6mr_update_thresholds(c, ttls); + ip6mr_update_thresholds(net, c, ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; @@ -1178,7 +1177,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) spin_unlock_bh(&mfc_unres_lock); if (uc) { - ip6mr_cache_resolve(uc, c); + ip6mr_cache_resolve(net, uc, c); ip6mr_cache_free(uc); } return 0; @@ -1229,7 +1228,7 @@ static void mroute_clean_tables(struct net *net) cp = &net->ipv6.mfc6_unres_queue; while ((c = *cp) != NULL) { *cp = c->next; - ip6mr_destroy_unres(c); + ip6mr_destroy_unres(net, c); } spin_unlock_bh(&mfc_unres_lock); } @@ -1497,10 +1496,10 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) * Processing handlers for ip6mr_forward */ -static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) +static int ip6mr_forward2(struct net *net, struct sk_buff *skb, + struct mfc6_cache *c, int vifi) { struct ipv6hdr *ipv6h; - struct net *net = mfc6_net(c); struct mif_device *vif = &net->ipv6.vif6_table[vifi]; struct net_device *dev; struct dst_entry *dst; @@ -1581,11 +1580,11 @@ static int ip6mr_find_vif(struct net_device *dev) return ct; } -static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) +static int ip6_mr_forward(struct net *net, struct sk_buff *skb, + struct mfc6_cache *cache) { int psend = -1; int vif, ct; - struct net *net = mfc6_net(cache); vif = cache->mf6c_parent; cache->mfc_un.res.pkt++; @@ -1627,13 +1626,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ip6mr_forward2(skb2, cache, psend); + ip6mr_forward2(net, skb2, cache, psend); } psend = ct; } } if (psend != -1) { - ip6mr_forward2(skb, cache, psend); + ip6mr_forward2(net, skb, cache, psend); return 0; } @@ -1674,7 +1673,7 @@ int ip6_mr_input(struct sk_buff *skb) return -ENODEV; } - ip6_mr_forward(skb, cache); + ip6_mr_forward(net, skb, cache); read_unlock(&mrt_lock); @@ -1683,11 +1682,11 @@ int ip6_mr_input(struct sk_buff *skb) static int -ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) +ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc6_cache *c, + struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; - struct net *net = mfc6_net(c); u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; @@ -1781,7 +1780,7 @@ int ip6mr_get_route(struct net *net, if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ip6mr_fill_mroute(skb, cache, rtm); + err = ip6mr_fill_mroute(net, skb, cache, rtm); read_unlock(&mrt_lock); return err; } -- cgit v1.2.3 From f30a77842129b5656360cc1f5db48a3fcfb64528 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 11 May 2010 14:40:51 +0200 Subject: ipv6: ip6mr: convert struct mfc_cache to struct list_head Signed-off-by: Patrick McHardy --- net/ipv6/ip6mr.c | 127 +++++++++++++++++++++++++++---------------------------- 1 file changed, 62 insertions(+), 65 deletions(-) (limited to 'net/ipv6') diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b3783a436bb..08e09042ad1 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -89,7 +89,7 @@ static void mroute_clean_tables(struct net *net); struct ipmr_mfc_iter { struct seq_net_private p; - struct mfc6_cache **cache; + struct list_head *cache; int ct; }; @@ -99,18 +99,18 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, { struct mfc6_cache *mfc; - it->cache = net->ipv6.mfc6_cache_array; read_lock(&mrt_lock); - for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) - for (mfc = net->ipv6.mfc6_cache_array[it->ct]; - mfc; mfc = mfc->next) + for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { + it->cache = &net->ipv6.mfc6_cache_array[it->ct]; + list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; + } read_unlock(&mrt_lock); - it->cache = &net->ipv6.mfc6_unres_queue; spin_lock_bh(&mfc_unres_lock); - for (mfc = net->ipv6.mfc6_unres_queue; mfc; mfc = mfc->next) + it->cache = &net->ipv6.mfc6_unres_queue; + list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -119,9 +119,6 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, return NULL; } - - - /* * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif */ @@ -238,18 +235,19 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (v == SEQ_START_TOKEN) return ipmr_mfc_seq_idx(net, seq->private, 0); - if (mfc->next) - return mfc->next; + if (mfc->list.next != it->cache) + return list_entry(mfc->list.next, struct mfc6_cache, list); if (it->cache == &net->ipv6.mfc6_unres_queue) goto end_of_list; - BUG_ON(it->cache != net->ipv6.mfc6_cache_array); + BUG_ON(it->cache != &net->ipv6.mfc6_cache_array[it->ct]); while (++it->ct < MFC6_LINES) { - mfc = net->ipv6.mfc6_cache_array[it->ct]; - if (mfc) - return mfc; + it->cache = &net->ipv6.mfc6_cache_array[it->ct]; + if (list_empty(it->cache)) + continue; + return list_first_entry(it->cache, struct mfc6_cache, list); } /* exhausted cache_array, show unresolved */ @@ -258,9 +256,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) it->ct = 0; spin_lock_bh(&mfc_unres_lock); - mfc = net->ipv6.mfc6_unres_queue; - if (mfc) - return mfc; + if (!list_empty(it->cache)) + return list_first_entry(it->cache, struct mfc6_cache, list); end_of_list: spin_unlock_bh(&mfc_unres_lock); @@ -560,25 +557,22 @@ static void ipmr_do_expire_process(struct net *net) { unsigned long now = jiffies; unsigned long expires = 10 * HZ; - struct mfc6_cache *c, **cp; - - cp = &net->ipv6.mfc6_unres_queue; + struct mfc6_cache *c, *next; - while ((c = *cp) != NULL) { + list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { /* not yet... */ unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) expires = interval; - cp = &c->next; continue; } - *cp = c->next; + list_del(&c->list); ip6mr_destroy_unres(net, c); } - if (net->ipv6.mfc6_unres_queue != NULL) + if (!list_empty(&net->ipv6.mfc6_unres_queue)) mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires); } @@ -591,7 +585,7 @@ static void ipmr_expire_process(unsigned long arg) return; } - if (net->ipv6.mfc6_unres_queue != NULL) + if (!list_empty(&net->ipv6.mfc6_unres_queue)) ipmr_do_expire_process(net); spin_unlock(&mfc_unres_lock); @@ -706,12 +700,12 @@ static struct mfc6_cache *ip6mr_cache_find(struct net *net, int line = MFC6_HASH(mcastgrp, origin); struct mfc6_cache *c; - for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) { + list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, origin) && ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) - break; + return c; } - return c; + return NULL; } /* @@ -872,17 +866,20 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, static int ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) { + bool found = false; int err; struct mfc6_cache *c; spin_lock_bh(&mfc_unres_lock); - for (c = net->ipv6.mfc6_unres_queue; c; c = c->next) { + list_for_each_entry(c, &net->ipv6.mfc6_unres_queue, list) { if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && - ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) + ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { + found = true; break; + } } - if (c == NULL) { + if (!found) { /* * Create a new entry if allowable */ @@ -918,8 +915,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) } atomic_inc(&net->ipv6.cache_resolve_queue_len); - c->next = net->ipv6.mfc6_unres_queue; - net->ipv6.mfc6_unres_queue = c; + list_add(&c->list, &net->ipv6.mfc6_unres_queue); ipmr_do_expire_process(net); } @@ -946,16 +942,15 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) { int line; - struct mfc6_cache *c, **cp; + struct mfc6_cache *c, *next; line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); - for (cp = &net->ipv6.mfc6_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { write_lock_bh(&mrt_lock); - *cp = c->next; + list_del(&c->list); write_unlock_bh(&mrt_lock); ip6mr_cache_free(c); @@ -997,7 +992,9 @@ static struct notifier_block ip6_mr_notifier = { static int __net_init ip6mr_net_init(struct net *net) { + unsigned int i; int err = 0; + net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), GFP_KERNEL); if (!net->ipv6.vif6_table) { @@ -1007,13 +1004,18 @@ static int __net_init ip6mr_net_init(struct net *net) /* Forwarding cache */ net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES, - sizeof(struct mfc6_cache *), + sizeof(struct list_head), GFP_KERNEL); if (!net->ipv6.mfc6_cache_array) { err = -ENOMEM; goto fail_mfc6_cache; } + for (i = 0; i < MFC6_LINES; i++) + INIT_LIST_HEAD(&net->ipv6.mfc6_cache_array[i]); + + INIT_LIST_HEAD(&net->ipv6.mfc6_unres_queue); + setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process, (unsigned long)net); @@ -1105,8 +1107,9 @@ void ip6_mr_cleanup(void) static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) { + bool found = false; int line; - struct mfc6_cache *uc, *c, **cp; + struct mfc6_cache *uc, *c; unsigned char ttls[MAXMIFS]; int i; @@ -1122,14 +1125,15 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); - for (cp = &net->ipv6.mfc6_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && - ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) + ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { + found = true; break; + } } - if (c != NULL) { + if (found) { write_lock_bh(&mrt_lock); c->mf6c_parent = mfc->mf6cc_parent; ip6mr_update_thresholds(net, c, ttls); @@ -1154,29 +1158,29 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) c->mfc_flags |= MFC_STATIC; write_lock_bh(&mrt_lock); - c->next = net->ipv6.mfc6_cache_array[line]; - net->ipv6.mfc6_cache_array[line] = c; + list_add(&c->list, &net->ipv6.mfc6_cache_array[line]); write_unlock_bh(&mrt_lock); /* * Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ + found = false; spin_lock_bh(&mfc_unres_lock); - for (cp = &net->ipv6.mfc6_unres_queue; (uc = *cp) != NULL; - cp = &uc->next) { + list_for_each_entry(uc, &net->ipv6.mfc6_unres_queue, list) { if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { - *cp = uc->next; + list_del(&uc->list); atomic_dec(&net->ipv6.cache_resolve_queue_len); + found = true; break; } } - if (net->ipv6.mfc6_unres_queue == NULL) + if (list_empty(&net->ipv6.mfc6_unres_queue)) del_timer(&net->ipv6.ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); - if (uc) { + if (found) { ip6mr_cache_resolve(net, uc, c); ip6mr_cache_free(uc); } @@ -1191,6 +1195,7 @@ static void mroute_clean_tables(struct net *net) { int i; LIST_HEAD(list); + struct mfc6_cache *c, *next; /* * Shut down all active vif entries @@ -1205,16 +1210,11 @@ static void mroute_clean_tables(struct net *net) * Wipe the cache */ for (i = 0; i < MFC6_LINES; i++) { - struct mfc6_cache *c, **cp; - - cp = &net->ipv6.mfc6_cache_array[i]; - while ((c = *cp) != NULL) { - if (c->mfc_flags & MFC_STATIC) { - cp = &c->next; + list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[i], list) { + if (c->mfc_flags & MFC_STATIC) continue; - } write_lock_bh(&mrt_lock); - *cp = c->next; + list_del(&c->list); write_unlock_bh(&mrt_lock); ip6mr_cache_free(c); @@ -1222,12 +1222,9 @@ static void mroute_clean_tables(struct net *net) } if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { - struct mfc6_cache *c, **cp; - spin_lock_bh(&mfc_unres_lock); - cp = &net->ipv6.mfc6_unres_queue; - while ((c = *cp) != NULL) { - *cp = c->next; + list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) { + list_del(&c->list); ip6mr_destroy_unres(net, c); } spin_unlock_bh(&mfc_unres_lock); -- cgit v1.2.3 From 6bd521433942d85e80f7a731a88cc91a327f38e0 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 11 May 2010 14:40:53 +0200 Subject: ipv6: ip6mr: move mroute data into seperate structure Signed-off-by: Patrick McHardy --- net/ipv6/ip6mr.c | 390 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 214 insertions(+), 176 deletions(-) (limited to 'net/ipv6') diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 08e09042ad1..9419fceeed4 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -51,6 +51,24 @@ #include #include +struct mr6_table { +#ifdef CONFIG_NET_NS + struct net *net; +#endif + struct sock *mroute6_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc6_unres_queue; + struct list_head mfc6_cache_array[MFC6_LINES]; + struct mif_device vif6_table[MAXMIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + int mroute_do_assert; + int mroute_do_pim; +#ifdef CONFIG_IPV6_PIMSM_V2 + int mroute_reg_vif_num; +#endif +}; + /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. */ @@ -61,7 +79,7 @@ static DEFINE_RWLOCK(mrt_lock); * Multicast router control variables */ -#define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) +#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL) /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); @@ -76,13 +94,13 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; -static int ip6_mr_forward(struct net *net, struct sk_buff *skb, - struct mfc6_cache *cache); -static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, +static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *cache); +static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, mifi_t mifi, int assert); -static int ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, +static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); -static void mroute_clean_tables(struct net *net); +static void mroute_clean_tables(struct mr6_table *mrt); #ifdef CONFIG_PROC_FS @@ -97,11 +115,12 @@ struct ipmr_mfc_iter { static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { + struct mr6_table *mrt = net->ipv6.mrt6; struct mfc6_cache *mfc; read_lock(&mrt_lock); for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { - it->cache = &net->ipv6.mfc6_cache_array[it->ct]; + it->cache = &mrt->mfc6_cache_array[it->ct]; list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; @@ -109,7 +128,7 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, read_unlock(&mrt_lock); spin_lock_bh(&mfc_unres_lock); - it->cache = &net->ipv6.mfc6_unres_queue; + it->cache = &mrt->mfc6_unres_queue; list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; @@ -132,11 +151,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { - for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) { - if (!MIF_EXISTS(net, iter->ct)) + struct mr6_table *mrt = net->ipv6.mrt6; + + for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { + if (!MIF_EXISTS(mrt, iter->ct)) continue; if (pos-- == 0) - return &net->ipv6.vif6_table[iter->ct]; + return &mrt->vif6_table[iter->ct]; } return NULL; } @@ -155,15 +176,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt = net->ipv6.mrt6; ++*pos; if (v == SEQ_START_TOKEN) return ip6mr_vif_seq_idx(net, iter, 0); - while (++iter->ct < net->ipv6.maxvif) { - if (!MIF_EXISTS(net, iter->ct)) + while (++iter->ct < mrt->maxvif) { + if (!MIF_EXISTS(mrt, iter->ct)) continue; - return &net->ipv6.vif6_table[iter->ct]; + return &mrt->vif6_table[iter->ct]; } return NULL; } @@ -177,6 +199,7 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); + struct mr6_table *mrt = net->ipv6.mrt6; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -187,7 +210,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", - vif - net->ipv6.vif6_table, + vif - mrt->vif6_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags); @@ -229,6 +252,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct mfc6_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt = net->ipv6.mrt6; ++*pos; @@ -238,13 +262,13 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (mfc->list.next != it->cache) return list_entry(mfc->list.next, struct mfc6_cache, list); - if (it->cache == &net->ipv6.mfc6_unres_queue) + if (it->cache == &mrt->mfc6_unres_queue) goto end_of_list; - BUG_ON(it->cache != &net->ipv6.mfc6_cache_array[it->ct]); + BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]); while (++it->ct < MFC6_LINES) { - it->cache = &net->ipv6.mfc6_cache_array[it->ct]; + it->cache = &mrt->mfc6_cache_array[it->ct]; if (list_empty(it->cache)) continue; return list_first_entry(it->cache, struct mfc6_cache, list); @@ -252,7 +276,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) /* exhausted cache_array, show unresolved */ read_unlock(&mrt_lock); - it->cache = &net->ipv6.mfc6_unres_queue; + it->cache = &mrt->mfc6_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); @@ -270,10 +294,11 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt = net->ipv6.mrt6; - if (it->cache == &net->ipv6.mfc6_unres_queue) + if (it->cache == &mrt->mfc6_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == net->ipv6.mfc6_cache_array) + else if (it->cache == mrt->mfc6_cache_array) read_unlock(&mrt_lock); } @@ -281,6 +306,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; struct net *net = seq_file_net(seq); + struct mr6_table *mrt = net->ipv6.mrt6; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -295,14 +321,14 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, mfc->mf6c_parent); - if (it->cache != &net->ipv6.mfc6_unres_queue) { + if (it->cache != &mrt->mfc6_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++) { - if (MIF_EXISTS(net, n) && + if (MIF_EXISTS(mrt, n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", @@ -349,7 +375,8 @@ static int pim6_rcv(struct sk_buff *skb) struct ipv6hdr *encap; struct net_device *reg_dev = NULL; struct net *net = dev_net(skb->dev); - int reg_vif_num = net->ipv6.mroute_reg_vif_num; + struct mr6_table *mrt = net->ipv6.mrt6; + int reg_vif_num = mrt->mroute_reg_vif_num; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; @@ -374,7 +401,7 @@ static int pim6_rcv(struct sk_buff *skb) read_lock(&mrt_lock); if (reg_vif_num >= 0) - reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; + reg_dev = mrt->vif6_table[reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); @@ -411,12 +438,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); + struct mr6_table *mrt = net->ipv6.mrt6; read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; - ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num, - MRT6MSG_WHOLEPKT); + ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; @@ -472,15 +499,16 @@ failure: * Delete a VIF entry */ -static int mif6_delete(struct net *net, int vifi, struct list_head *head) +static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) { struct mif_device *v; struct net_device *dev; struct inet6_dev *in6_dev; - if (vifi < 0 || vifi >= net->ipv6.maxvif) + + if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; - v = &net->ipv6.vif6_table[vifi]; + v = &mrt->vif6_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; @@ -492,17 +520,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) } #ifdef CONFIG_IPV6_PIMSM_V2 - if (vifi == net->ipv6.mroute_reg_vif_num) - net->ipv6.mroute_reg_vif_num = -1; + if (vifi == mrt->mroute_reg_vif_num) + mrt->mroute_reg_vif_num = -1; #endif - if (vifi + 1 == net->ipv6.maxvif) { + if (vifi + 1 == mrt->maxvif) { int tmp; for (tmp = vifi - 1; tmp >= 0; tmp--) { - if (MIF_EXISTS(net, tmp)) + if (MIF_EXISTS(mrt, tmp)) break; } - net->ipv6.maxvif = tmp + 1; + mrt->maxvif = tmp + 1; } write_unlock_bh(&mrt_lock); @@ -529,11 +557,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c) and reporting error to netlink readers. */ -static void ip6mr_destroy_unres(struct net *net, struct mfc6_cache *c) +static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c) { + struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; - atomic_dec(&net->ipv6.cache_resolve_queue_len); + atomic_dec(&mrt->cache_resolve_queue_len); while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { if (ipv6_hdr(skb)->version == 0) { @@ -553,13 +582,13 @@ static void ip6mr_destroy_unres(struct net *net, struct mfc6_cache *c) /* Timer process for all the unresolved queue. */ -static void ipmr_do_expire_process(struct net *net) +static void ipmr_do_expire_process(struct mr6_table *mrt) { unsigned long now = jiffies; unsigned long expires = 10 * HZ; struct mfc6_cache *c, *next; - list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) { + list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { /* not yet... */ unsigned long interval = c->mfc_un.unres.expires - now; @@ -569,31 +598,31 @@ static void ipmr_do_expire_process(struct net *net) } list_del(&c->list); - ip6mr_destroy_unres(net, c); + ip6mr_destroy_unres(mrt, c); } - if (!list_empty(&net->ipv6.mfc6_unres_queue)) - mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires); + if (!list_empty(&mrt->mfc6_unres_queue)) + mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); } static void ipmr_expire_process(unsigned long arg) { - struct net *net = (struct net *)arg; + struct mr6_table *mrt = (struct mr6_table *)arg; if (!spin_trylock(&mfc_unres_lock)) { - mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + 1); + mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); return; } - if (!list_empty(&net->ipv6.mfc6_unres_queue)) - ipmr_do_expire_process(net); + if (!list_empty(&mrt->mfc6_unres_queue)) + ipmr_do_expire_process(mrt); spin_unlock(&mfc_unres_lock); } /* Fill oifs list. It is called under write locked mrt_lock. */ -static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache, +static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache, unsigned char *ttls) { int vifi; @@ -602,8 +631,8 @@ static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache, cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXMIFS); - for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) { - if (MIF_EXISTS(net, vifi) && + for (vifi = 0; vifi < mrt->maxvif; vifi++) { + if (MIF_EXISTS(mrt, vifi) && ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) @@ -614,16 +643,17 @@ static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache, } } -static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) +static int mif6_add(struct net *net, struct mr6_table *mrt, + struct mif6ctl *vifc, int mrtsock) { int vifi = vifc->mif6c_mifi; - struct mif_device *v = &net->ipv6.vif6_table[vifi]; + struct mif_device *v = &mrt->vif6_table[vifi]; struct net_device *dev; struct inet6_dev *in6_dev; int err; /* Is vif busy ? */ - if (MIF_EXISTS(net, vifi)) + if (MIF_EXISTS(mrt, vifi)) return -EADDRINUSE; switch (vifc->mif6c_flags) { @@ -633,7 +663,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ - if (net->ipv6.mroute_reg_vif_num >= 0) + if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; dev = ip6mr_reg_vif(net); if (!dev) @@ -685,22 +715,22 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) v->dev = dev; #ifdef CONFIG_IPV6_PIMSM_V2 if (v->flags & MIFF_REGISTER) - net->ipv6.mroute_reg_vif_num = vifi; + mrt->mroute_reg_vif_num = vifi; #endif - if (vifi + 1 > net->ipv6.maxvif) - net->ipv6.maxvif = vifi + 1; + if (vifi + 1 > mrt->maxvif) + mrt->maxvif = vifi + 1; write_unlock_bh(&mrt_lock); return 0; } -static struct mfc6_cache *ip6mr_cache_find(struct net *net, +static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, struct in6_addr *origin, struct in6_addr *mcastgrp) { int line = MFC6_HASH(mcastgrp, origin); struct mfc6_cache *c; - list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) { + list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, origin) && ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) return c; @@ -734,8 +764,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(void) * A cache entry has gone into a resolved state from queued */ -static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc, - struct mfc6_cache *c) +static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, + struct mfc6_cache *uc, struct mfc6_cache *c) { struct sk_buff *skb; @@ -748,7 +778,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc, int err; struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); - if (ip6mr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) { + if (ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; @@ -758,7 +788,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc, } err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else - ip6_mr_forward(net, skb, c); + ip6_mr_forward(net, mrt, skb, c); } } @@ -769,8 +799,8 @@ static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc, * Called under mrt_lock. */ -static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, - int assert) +static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, + mifi_t mifi, int assert) { struct sk_buff *skb; struct mrt6msg *msg; @@ -806,7 +836,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, msg = (struct mrt6msg *)skb_transport_header(skb); msg->im6_mbz = 0; msg->im6_msgtype = MRT6MSG_WHOLEPKT; - msg->im6_mif = net->ipv6.mroute_reg_vif_num; + msg->im6_mif = mrt->mroute_reg_vif_num; msg->im6_pad = 0; ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); @@ -841,7 +871,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (net->ipv6.mroute6_sk == NULL) { + if (mrt->mroute6_sk == NULL) { kfree_skb(skb); return -EINVAL; } @@ -849,7 +879,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, /* * Deliver to user space multicast routing algorithms */ - ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb); + ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); if (ret < 0) { if (net_ratelimit()) printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); @@ -864,14 +894,14 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, */ static int -ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) +ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb) { bool found = false; int err; struct mfc6_cache *c; spin_lock_bh(&mfc_unres_lock); - list_for_each_entry(c, &net->ipv6.mfc6_unres_queue, list) { + list_for_each_entry(c, &mrt->mfc6_unres_queue, list) { if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { found = true; @@ -884,7 +914,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) * Create a new entry if allowable */ - if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || + if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || (c = ip6mr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); @@ -902,7 +932,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) /* * Reflect first query at pim6sd */ - err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE); + err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE); if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker @@ -914,10 +944,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) return err; } - atomic_inc(&net->ipv6.cache_resolve_queue_len); - list_add(&c->list, &net->ipv6.mfc6_unres_queue); + atomic_inc(&mrt->cache_resolve_queue_len); + list_add(&c->list, &mrt->mfc6_unres_queue); - ipmr_do_expire_process(net); + ipmr_do_expire_process(mrt); } /* @@ -939,14 +969,14 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) * MFC6 cache manipulation by user space */ -static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) +static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc) { int line; struct mfc6_cache *c, *next; line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); - list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[line], list) { + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { write_lock_bh(&mrt_lock); @@ -965,6 +995,7 @@ static int ip6mr_device_event(struct notifier_block *this, { struct net_device *dev = ptr; struct net *net = dev_net(dev); + struct mr6_table *mrt = net->ipv6.mrt6; struct mif_device *v; int ct; LIST_HEAD(list); @@ -972,10 +1003,10 @@ static int ip6mr_device_event(struct notifier_block *this, if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v = &net->ipv6.vif6_table[0]; - for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { + v = &mrt->vif6_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) - mif6_delete(net, ct, &list); + mif6_delete(mrt, ct, &list); } unregister_netdevice_many(&list); @@ -992,35 +1023,28 @@ static struct notifier_block ip6_mr_notifier = { static int __net_init ip6mr_net_init(struct net *net) { + struct mr6_table *mrt; unsigned int i; int err = 0; - net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), - GFP_KERNEL); - if (!net->ipv6.vif6_table) { + mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); + if (mrt == NULL) { err = -ENOMEM; goto fail; } - /* Forwarding cache */ - net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES, - sizeof(struct list_head), - GFP_KERNEL); - if (!net->ipv6.mfc6_cache_array) { - err = -ENOMEM; - goto fail_mfc6_cache; - } + write_pnet(&mrt->net, net); for (i = 0; i < MFC6_LINES; i++) - INIT_LIST_HEAD(&net->ipv6.mfc6_cache_array[i]); + INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]); - INIT_LIST_HEAD(&net->ipv6.mfc6_unres_queue); + INIT_LIST_HEAD(&mrt->mfc6_unres_queue); - setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process, - (unsigned long)net); + setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, + (unsigned long)mrt); #ifdef CONFIG_IPV6_PIMSM_V2 - net->ipv6.mroute_reg_vif_num = -1; + mrt->mroute_reg_vif_num = -1; #endif #ifdef CONFIG_PROC_FS @@ -1030,30 +1054,31 @@ static int __net_init ip6mr_net_init(struct net *net) if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) goto proc_cache_fail; #endif + + net->ipv6.mrt6 = mrt; return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip6_mr_vif"); proc_vif_fail: - kfree(net->ipv6.mfc6_cache_array); + kfree(mrt); #endif -fail_mfc6_cache: - kfree(net->ipv6.vif6_table); fail: return err; } static void __net_exit ip6mr_net_exit(struct net *net) { + struct mr6_table *mrt = net->ipv6.mrt6; + #ifdef CONFIG_PROC_FS proc_net_remove(net, "ip6_mr_cache"); proc_net_remove(net, "ip6_mr_vif"); #endif - del_timer(&net->ipv6.ipmr_expire_timer); - mroute_clean_tables(net); - kfree(net->ipv6.mfc6_cache_array); - kfree(net->ipv6.vif6_table); + del_timer(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); } static struct pernet_operations ip6mr_net_ops = { @@ -1105,7 +1130,8 @@ void ip6_mr_cleanup(void) kmem_cache_destroy(mrt_cachep); } -static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) +static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, + struct mf6cctl *mfc, int mrtsock) { bool found = false; int line; @@ -1125,7 +1151,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); - list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) { + list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { found = true; @@ -1136,7 +1162,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) if (found) { write_lock_bh(&mrt_lock); c->mf6c_parent = mfc->mf6cc_parent; - ip6mr_update_thresholds(net, c, ttls); + ip6mr_update_thresholds(mrt, c, ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); @@ -1153,12 +1179,12 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; c->mf6c_parent = mfc->mf6cc_parent; - ip6mr_update_thresholds(net, c, ttls); + ip6mr_update_thresholds(mrt, c, ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_lock_bh(&mrt_lock); - list_add(&c->list, &net->ipv6.mfc6_cache_array[line]); + list_add(&c->list, &mrt->mfc6_cache_array[line]); write_unlock_bh(&mrt_lock); /* @@ -1167,21 +1193,21 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) */ found = false; spin_lock_bh(&mfc_unres_lock); - list_for_each_entry(uc, &net->ipv6.mfc6_unres_queue, list) { + list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) { if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { list_del(&uc->list); - atomic_dec(&net->ipv6.cache_resolve_queue_len); + atomic_dec(&mrt->cache_resolve_queue_len); found = true; break; } } - if (list_empty(&net->ipv6.mfc6_unres_queue)) - del_timer(&net->ipv6.ipmr_expire_timer); + if (list_empty(&mrt->mfc6_unres_queue)) + del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (found) { - ip6mr_cache_resolve(net, uc, c); + ip6mr_cache_resolve(net, mrt, uc, c); ip6mr_cache_free(uc); } return 0; @@ -1191,7 +1217,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct net *net) +static void mroute_clean_tables(struct mr6_table *mrt) { int i; LIST_HEAD(list); @@ -1200,9 +1226,9 @@ static void mroute_clean_tables(struct net *net) /* * Shut down all active vif entries */ - for (i = 0; i < net->ipv6.maxvif; i++) { - if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) - mif6_delete(net, i, &list); + for (i = 0; i < mrt->maxvif; i++) { + if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) + mif6_delete(mrt, i, &list); } unregister_netdevice_many(&list); @@ -1210,7 +1236,7 @@ static void mroute_clean_tables(struct net *net) * Wipe the cache */ for (i = 0; i < MFC6_LINES; i++) { - list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[i], list) { + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { if (c->mfc_flags & MFC_STATIC) continue; write_lock_bh(&mrt_lock); @@ -1221,25 +1247,25 @@ static void mroute_clean_tables(struct net *net) } } - if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { + if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); - list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) { + list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { list_del(&c->list); - ip6mr_destroy_unres(net, c); + ip6mr_destroy_unres(mrt, c); } spin_unlock_bh(&mfc_unres_lock); } } -static int ip6mr_sk_init(struct sock *sk) +static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) { int err = 0; struct net *net = sock_net(sk); rtnl_lock(); write_lock_bh(&mrt_lock); - if (likely(net->ipv6.mroute6_sk == NULL)) { - net->ipv6.mroute6_sk = sk; + if (likely(mrt->mroute6_sk == NULL)) { + mrt->mroute6_sk = sk; net->ipv6.devconf_all->mc_forwarding++; } else @@ -1255,15 +1281,16 @@ int ip6mr_sk_done(struct sock *sk) { int err = 0; struct net *net = sock_net(sk); + struct mr6_table *mrt = net->ipv6.mrt6; rtnl_lock(); - if (sk == net->ipv6.mroute6_sk) { + if (sk == mrt->mroute6_sk) { write_lock_bh(&mrt_lock); - net->ipv6.mroute6_sk = NULL; + mrt->mroute6_sk = NULL; net->ipv6.devconf_all->mc_forwarding--; write_unlock_bh(&mrt_lock); - mroute_clean_tables(net); + mroute_clean_tables(mrt); } else err = -EACCES; rtnl_unlock(); @@ -1271,6 +1298,13 @@ int ip6mr_sk_done(struct sock *sk) return err; } +struct sock *mroute6_socket(struct net *net) +{ + struct mr6_table *mrt = net->ipv6.mrt6; + + return mrt->mroute6_sk; +} + /* * Socket options and virtual interface manipulation. The whole * virtual interface system is a complete heap, but unfortunately @@ -1285,9 +1319,10 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns struct mf6cctl mfc; mifi_t mifi; struct net *net = sock_net(sk); + struct mr6_table *mrt = net->ipv6.mrt6; if (optname != MRT6_INIT) { - if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN)) + if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) return -EACCES; } @@ -1299,7 +1334,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen < sizeof(int)) return -EINVAL; - return ip6mr_sk_init(sk); + return ip6mr_sk_init(mrt, sk); case MRT6_DONE: return ip6mr_sk_done(sk); @@ -1312,7 +1347,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (vif.mif6c_mifi >= MAXMIFS) return -ENFILE; rtnl_lock(); - ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk); + ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk); rtnl_unlock(); return ret; @@ -1322,7 +1357,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (copy_from_user(&mifi, optval, sizeof(mifi_t))) return -EFAULT; rtnl_lock(); - ret = mif6_delete(net, mifi, NULL); + ret = mif6_delete(mrt, mifi, NULL); rtnl_unlock(); return ret; @@ -1338,10 +1373,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns return -EFAULT; rtnl_lock(); if (optname == MRT6_DEL_MFC) - ret = ip6mr_mfc_delete(net, &mfc); + ret = ip6mr_mfc_delete(mrt, &mfc); else - ret = ip6mr_mfc_add(net, &mfc, - sk == net->ipv6.mroute6_sk); + ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk); rtnl_unlock(); return ret; @@ -1353,7 +1387,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns int v; if (get_user(v, (int __user *)optval)) return -EFAULT; - net->ipv6.mroute_do_assert = !!v; + mrt->mroute_do_assert = !!v; return 0; } @@ -1366,9 +1400,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns v = !!v; rtnl_lock(); ret = 0; - if (v != net->ipv6.mroute_do_pim) { - net->ipv6.mroute_do_pim = v; - net->ipv6.mroute_do_assert = v; + if (v != mrt->mroute_do_pim) { + mrt->mroute_do_pim = v; + mrt->mroute_do_assert = v; } rtnl_unlock(); return ret; @@ -1394,6 +1428,7 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int olr; int val; struct net *net = sock_net(sk); + struct mr6_table *mrt = net->ipv6.mrt6; switch (optname) { case MRT6_VERSION: @@ -1401,11 +1436,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, break; #ifdef CONFIG_IPV6_PIMSM_V2 case MRT6_PIM: - val = net->ipv6.mroute_do_pim; + val = mrt->mroute_do_pim; break; #endif case MRT6_ASSERT: - val = net->ipv6.mroute_do_assert; + val = mrt->mroute_do_assert; break; default: return -ENOPROTOOPT; @@ -1436,16 +1471,17 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) struct mif_device *vif; struct mfc6_cache *c; struct net *net = sock_net(sk); + struct mr6_table *mrt = net->ipv6.mrt6; switch (cmd) { case SIOCGETMIFCNT_IN6: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; - if (vr.mifi >= net->ipv6.maxvif) + if (vr.mifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); - vif = &net->ipv6.vif6_table[vr.mifi]; - if (MIF_EXISTS(net, vr.mifi)) { + vif = &mrt->vif6_table[vr.mifi]; + if (MIF_EXISTS(mrt, vr.mifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; @@ -1463,7 +1499,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) return -EFAULT; read_lock(&mrt_lock); - c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr); + c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; @@ -1493,11 +1529,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) * Processing handlers for ip6mr_forward */ -static int ip6mr_forward2(struct net *net, struct sk_buff *skb, - struct mfc6_cache *c, int vifi) +static int ip6mr_forward2(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *c, int vifi) { struct ipv6hdr *ipv6h; - struct mif_device *vif = &net->ipv6.vif6_table[vifi]; + struct mif_device *vif = &mrt->vif6_table[vifi]; struct net_device *dev; struct dst_entry *dst; struct flowi fl; @@ -1511,7 +1547,7 @@ static int ip6mr_forward2(struct net *net, struct sk_buff *skb, vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; - ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT); + ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); goto out_free; } #endif @@ -1566,19 +1602,19 @@ out_free: return 0; } -static int ip6mr_find_vif(struct net_device *dev) +static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev) { - struct net *net = dev_net(dev); int ct; - for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) { - if (net->ipv6.vif6_table[ct].dev == dev) + + for (ct = mrt->maxvif - 1; ct >= 0; ct--) { + if (mrt->vif6_table[ct].dev == dev) break; } return ct; } -static int ip6_mr_forward(struct net *net, struct sk_buff *skb, - struct mfc6_cache *cache) +static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *cache) { int psend = -1; int vif, ct; @@ -1590,30 +1626,30 @@ static int ip6_mr_forward(struct net *net, struct sk_buff *skb, /* * Wrong interface: drop packet and (maybe) send PIM assert. */ - if (net->ipv6.vif6_table[vif].dev != skb->dev) { + if (mrt->vif6_table[vif].dev != skb->dev) { int true_vifi; cache->mfc_un.res.wrong_if++; - true_vifi = ip6mr_find_vif(skb->dev); + true_vifi = ip6mr_find_vif(mrt, skb->dev); - if (true_vifi >= 0 && net->ipv6.mroute_do_assert && + if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, so that we cannot check that packet arrived on an oif. It is bad, but otherwise we would need to move pretty large chunk of pimd to kernel. Ough... --ANK */ - (net->ipv6.mroute_do_pim || + (mrt->mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; - ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF); + ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); } goto dont_forward; } - net->ipv6.vif6_table[vif].pkt_in++; - net->ipv6.vif6_table[vif].bytes_in += skb->len; + mrt->vif6_table[vif].pkt_in++; + mrt->vif6_table[vif].bytes_in += skb->len; /* * Forward the frame @@ -1623,13 +1659,13 @@ static int ip6_mr_forward(struct net *net, struct sk_buff *skb, if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ip6mr_forward2(net, skb2, cache, psend); + ip6mr_forward2(net, mrt, skb2, cache, psend); } psend = ct; } } if (psend != -1) { - ip6mr_forward2(net, skb, cache, psend); + ip6mr_forward2(net, mrt, skb, cache, psend); return 0; } @@ -1647,9 +1683,10 @@ int ip6_mr_input(struct sk_buff *skb) { struct mfc6_cache *cache; struct net *net = dev_net(skb->dev); + struct mr6_table *mrt = net->ipv6.mrt6; read_lock(&mrt_lock); - cache = ip6mr_cache_find(net, + cache = ip6mr_cache_find(mrt, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); /* @@ -1658,9 +1695,9 @@ int ip6_mr_input(struct sk_buff *skb) if (cache == NULL) { int vif; - vif = ip6mr_find_vif(skb->dev); + vif = ip6mr_find_vif(mrt, skb->dev); if (vif >= 0) { - int err = ip6mr_cache_unresolved(net, vif, skb); + int err = ip6mr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); return err; @@ -1670,7 +1707,7 @@ int ip6_mr_input(struct sk_buff *skb) return -ENODEV; } - ip6_mr_forward(net, skb, cache); + ip6_mr_forward(net, mrt, skb, cache); read_unlock(&mrt_lock); @@ -1679,8 +1716,8 @@ int ip6_mr_input(struct sk_buff *skb) static int -ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc6_cache *c, - struct rtmsg *rtm) +ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + struct mfc6_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; @@ -1691,19 +1728,19 @@ ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc6_cache *c, if (c->mf6c_parent > MAXMIFS) return -ENOENT; - if (MIF_EXISTS(net, c->mf6c_parent)) - RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); + if (MIF_EXISTS(mrt, c->mf6c_parent)) + RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { - if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { + if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; - nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex; + nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } @@ -1721,11 +1758,12 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; + struct mr6_table *mrt = net->ipv6.mrt6; struct mfc6_cache *cache; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); read_lock(&mrt_lock); - cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); + cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); if (!cache) { struct sk_buff *skb2; @@ -1739,7 +1777,7 @@ int ip6mr_get_route(struct net *net, } dev = skb->dev; - if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) { + if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) { read_unlock(&mrt_lock); return -ENODEV; } @@ -1768,7 +1806,7 @@ int ip6mr_get_route(struct net *net, ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); - err = ip6mr_cache_unresolved(net, vif, skb2); + err = ip6mr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); return err; @@ -1777,7 +1815,7 @@ int ip6mr_get_route(struct net *net, if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ip6mr_fill_mroute(net, skb, cache, rtm); + err = ip6mr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); return err; } -- cgit v1.2.3 From d1db275dd3f6e4182c4c4b4a1ac6287925d60569 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 11 May 2010 14:40:55 +0200 Subject: ipv6: ip6mr: support multiple tables This patch adds support for multiple independant multicast routing instances, named "tables". Userspace multicast routing daemons can bind to a specific table instance by issuing a setsockopt call using a new option MRT6_TABLE. The table number is stored in the raw socket data and affects all following ip6mr setsockopt(), getsockopt() and ioctl() calls. By default, a single table (RT6_TABLE_DFLT) is created with a default routing rule pointing to it. Newly created pim6reg devices have the table number appended ("pim6regX"), with the exception of devices created in the default table, which are named just "pim6reg" for compatibility reasons. Packets are directed to a specific table instance using routing rules, similar to how regular routing rules work. Currently iif, oif and mark are supported as keys, source and destination addresses could be supported additionally. Example usage: - bind pimd/xorp/... to a specific table: uint32_t table = 123; setsockopt(fd, SOL_IPV6, MRT6_TABLE, &table, sizeof(table)); - create routing rules directing packets to the new table: # ip -6 mrule add iif eth0 lookup 123 # ip -6 mrule add oif eth0 lookup 123 Signed-off-by: Patrick McHardy --- net/ipv6/Kconfig | 14 ++ net/ipv6/ip6_output.c | 2 +- net/ipv6/ip6mr.c | 428 ++++++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 377 insertions(+), 67 deletions(-) (limited to 'net/ipv6') diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index a578096152a..36d7437ac05 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -229,6 +229,20 @@ config IPV6_MROUTE Experimental support for IPv6 multicast forwarding. If unsure, say N. +config IPV6_MROUTE_MULTIPLE_TABLES + bool "IPv6: multicast policy routing" + depends on IPV6_MROUTE + select FIB_RULES + help + Normally, a multicast router runs a userspace daemon and decides + what to do with a multicast packet based on the source and + destination addresses. If you say Y here, the multicast router + will also be able to take interfaces and packet marks into + account and run multiple instances of userspace daemons + simultaneously, each one handling a single table. + + If unsure, say N. + config IPV6_PIMSM_V2 bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" depends on IPV6_MROUTE diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5173acaeb50..cd963f64e27 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -108,7 +108,7 @@ static int ip6_finish_output2(struct sk_buff *skb) struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && - ((mroute6_socket(dev_net(dev)) && + ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 9419fceeed4..c2920a1a6db 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -52,9 +53,11 @@ #include struct mr6_table { + struct list_head list; #ifdef CONFIG_NET_NS struct net *net; #endif + u32 id; struct sock *mroute6_sk; struct timer_list ipmr_expire_timer; struct list_head mfc6_unres_queue; @@ -69,6 +72,14 @@ struct mr6_table { #endif }; +struct ip6mr_rule { + struct fib_rule common; +}; + +struct ip6mr_result { + struct mr6_table *mrt; +}; + /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. */ @@ -94,6 +105,9 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; +static struct mr6_table *ip6mr_new_table(struct net *net, u32 id); +static void ip6mr_free_table(struct mr6_table *mrt); + static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, struct sk_buff *skb, struct mfc6_cache *cache); static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, @@ -101,12 +115,220 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); static void mroute_clean_tables(struct mr6_table *mrt); +static void ipmr_expire_process(unsigned long arg); + +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES +#define ip6mr_for_each_table(mrt, met) \ + list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) + +static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) +{ + struct mr6_table *mrt; + ip6mr_for_each_table(mrt, net) { + if (mrt->id == id) + return mrt; + } + return NULL; +} + +static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, + struct mr6_table **mrt) +{ + struct ip6mr_result res; + struct fib_lookup_arg arg = { .result = &res, }; + int err; + + err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg); + if (err < 0) + return err; + *mrt = res.mrt; + return 0; +} + +static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, + int flags, struct fib_lookup_arg *arg) +{ + struct ip6mr_result *res = arg->result; + struct mr6_table *mrt; + + switch (rule->action) { + case FR_ACT_TO_TBL: + break; + case FR_ACT_UNREACHABLE: + return -ENETUNREACH; + case FR_ACT_PROHIBIT: + return -EACCES; + case FR_ACT_BLACKHOLE: + default: + return -EINVAL; + } + + mrt = ip6mr_get_table(rule->fr_net, rule->table); + if (mrt == NULL) + return -EAGAIN; + res->mrt = mrt; + return 0; +} + +static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) +{ + return 1; +} + +static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { + FRA_GENERIC_POLICY, +}; + +static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh, struct nlattr **tb) +{ + return 0; +} + +static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, + struct nlattr **tb) +{ + return 1; +} + +static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh) +{ + frh->dst_len = 0; + frh->src_len = 0; + frh->tos = 0; + return 0; +} + +static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = { + .family = RTNL_FAMILY_IP6MR, + .rule_size = sizeof(struct ip6mr_rule), + .addr_size = sizeof(struct in6_addr), + .action = ip6mr_rule_action, + .match = ip6mr_rule_match, + .configure = ip6mr_rule_configure, + .compare = ip6mr_rule_compare, + .default_pref = fib_default_rule_pref, + .fill = ip6mr_rule_fill, + .nlgroup = RTNLGRP_IPV6_RULE, + .policy = ip6mr_rule_policy, + .owner = THIS_MODULE, +}; + +static int __net_init ip6mr_rules_init(struct net *net) +{ + struct fib_rules_ops *ops; + struct mr6_table *mrt; + int err; + + ops = fib_rules_register(&ip6mr_rules_ops_template, net); + if (IS_ERR(ops)) + return PTR_ERR(ops); + + INIT_LIST_HEAD(&net->ipv6.mr6_tables); + + mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) { + err = -ENOMEM; + goto err1; + } + + err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0); + if (err < 0) + goto err2; + + net->ipv6.mr6_rules_ops = ops; + return 0; + +err2: + kfree(mrt); +err1: + fib_rules_unregister(ops); + return err; +} + +static void __net_exit ip6mr_rules_exit(struct net *net) +{ + struct mr6_table *mrt, *next; + + list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) + ip6mr_free_table(mrt); + fib_rules_unregister(net->ipv6.mr6_rules_ops); +} +#else +#define ip6mr_for_each_table(mrt, net) \ + for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) + +static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) +{ + return net->ipv6.mrt6; +} + +static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, + struct mr6_table **mrt) +{ + *mrt = net->ipv6.mrt6; + return 0; +} + +static int __net_init ip6mr_rules_init(struct net *net) +{ + net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT); + return net->ipv6.mrt6 ? 0 : -ENOMEM; +} + +static void __net_exit ip6mr_rules_exit(struct net *net) +{ + ip6mr_free_table(net->ipv6.mrt6); +} +#endif + +static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) +{ + struct mr6_table *mrt; + unsigned int i; + + mrt = ip6mr_get_table(net, id); + if (mrt != NULL) + return mrt; + + mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); + if (mrt == NULL) + return NULL; + mrt->id = id; + write_pnet(&mrt->net, net); + + /* Forwarding cache */ + for (i = 0; i < MFC6_LINES; i++) + INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]); + + INIT_LIST_HEAD(&mrt->mfc6_unres_queue); + + setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, + (unsigned long)mrt); + +#ifdef CONFIG_IPV6_PIMSM_V2 + mrt->mroute_reg_vif_num = -1; +#endif +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables); +#endif + return mrt; +} + +static void ip6mr_free_table(struct mr6_table *mrt) +{ + del_timer(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); +} #ifdef CONFIG_PROC_FS struct ipmr_mfc_iter { struct seq_net_private p; + struct mr6_table *mrt; struct list_head *cache; int ct; }; @@ -115,7 +337,7 @@ struct ipmr_mfc_iter { static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt = it->mrt; struct mfc6_cache *mfc; read_lock(&mrt_lock); @@ -144,6 +366,7 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_vif_iter { struct seq_net_private p; + struct mr6_table *mrt; int ct; }; @@ -151,7 +374,7 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt = iter->mrt; for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { if (!MIF_EXISTS(mrt, iter->ct)) @@ -165,7 +388,15 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { + struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + + iter->mrt = mrt; read_lock(&mrt_lock); return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1) @@ -176,7 +407,7 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt = iter->mrt; ++*pos; if (v == SEQ_START_TOKEN) @@ -198,8 +429,8 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) { - struct net *net = seq_file_net(seq); - struct mr6_table *mrt = net->ipv6.mrt6; + struct ipmr_vif_iter *iter = seq->private; + struct mr6_table *mrt = iter->mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -241,8 +472,15 @@ static const struct file_operations ip6mr_vif_fops = { static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { + struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + it->mrt = mrt; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } @@ -252,7 +490,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct mfc6_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt = it->mrt; ++*pos; @@ -293,8 +531,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; - struct net *net = seq_file_net(seq); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt = it->mrt; if (it->cache == &mrt->mfc6_unres_queue) spin_unlock_bh(&mfc_unres_lock); @@ -305,8 +542,6 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; - struct net *net = seq_file_net(seq); - struct mr6_table *mrt = net->ipv6.mrt6; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -316,6 +551,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) } else { const struct mfc6_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; + struct mr6_table *mrt = it->mrt; seq_printf(seq, "%pI6 %pI6 %-3hd", &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, @@ -375,8 +611,12 @@ static int pim6_rcv(struct sk_buff *skb) struct ipv6hdr *encap; struct net_device *reg_dev = NULL; struct net *net = dev_net(skb->dev); - struct mr6_table *mrt = net->ipv6.mrt6; - int reg_vif_num = mrt->mroute_reg_vif_num; + struct mr6_table *mrt; + struct flowi fl = { + .iif = skb->dev->ifindex, + .mark = skb->mark, + }; + int reg_vif_num; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; @@ -399,6 +639,10 @@ static int pim6_rcv(struct sk_buff *skb) ntohs(encap->payload_len) + sizeof(*pim) > skb->len) goto drop; + if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) + goto drop; + reg_vif_num = mrt->mroute_reg_vif_num; + read_lock(&mrt_lock); if (reg_vif_num >= 0) reg_dev = mrt->vif6_table[reg_vif_num].dev; @@ -438,7 +682,17 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; + struct flowi fl = { + .oif = dev->ifindex, + .iif = skb->skb_iif, + .mark = skb->mark, + }; + int err; + + err = ip6mr_fib_lookup(net, &fl, &mrt); + if (err < 0) + return err; read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; @@ -463,11 +717,17 @@ static void reg_vif_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; } -static struct net_device *ip6mr_reg_vif(struct net *net) +static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) { struct net_device *dev; + char name[IFNAMSIZ]; + + if (mrt->id == RT6_TABLE_DFLT) + sprintf(name, "pim6reg"); + else + sprintf(name, "pim6reg%u", mrt->id); - dev = alloc_netdev(0, "pim6reg", reg_vif_setup); + dev = alloc_netdev(0, name, reg_vif_setup); if (dev == NULL) return NULL; @@ -665,7 +925,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt, */ if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; - dev = ip6mr_reg_vif(net); + dev = ip6mr_reg_vif(net, mrt); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); @@ -995,7 +1255,7 @@ static int ip6mr_device_event(struct notifier_block *this, { struct net_device *dev = ptr; struct net *net = dev_net(dev); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; struct mif_device *v; int ct; LIST_HEAD(list); @@ -1003,10 +1263,12 @@ static int ip6mr_device_event(struct notifier_block *this, if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v = &mrt->vif6_table[0]; - for (ct = 0; ct < mrt->maxvif; ct++, v++) { - if (v->dev == dev) - mif6_delete(mrt, ct, &list); + ip6mr_for_each_table(mrt, net) { + v = &mrt->vif6_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) + mif6_delete(mrt, ct, &list); + } } unregister_netdevice_many(&list); @@ -1023,29 +1285,11 @@ static struct notifier_block ip6_mr_notifier = { static int __net_init ip6mr_net_init(struct net *net) { - struct mr6_table *mrt; - unsigned int i; - int err = 0; + int err; - mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); - if (mrt == NULL) { - err = -ENOMEM; + err = ip6mr_rules_init(net); + if (err < 0) goto fail; - } - - write_pnet(&mrt->net, net); - - for (i = 0; i < MFC6_LINES; i++) - INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]); - - INIT_LIST_HEAD(&mrt->mfc6_unres_queue); - - setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, - (unsigned long)mrt); - -#ifdef CONFIG_IPV6_PIMSM_V2 - mrt->mroute_reg_vif_num = -1; -#endif #ifdef CONFIG_PROC_FS err = -ENOMEM; @@ -1055,14 +1299,13 @@ static int __net_init ip6mr_net_init(struct net *net) goto proc_cache_fail; #endif - net->ipv6.mrt6 = mrt; return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip6_mr_vif"); proc_vif_fail: - kfree(mrt); + ip6mr_rules_exit(net); #endif fail: return err; @@ -1070,15 +1313,11 @@ fail: static void __net_exit ip6mr_net_exit(struct net *net) { - struct mr6_table *mrt = net->ipv6.mrt6; - #ifdef CONFIG_PROC_FS proc_net_remove(net, "ip6_mr_cache"); proc_net_remove(net, "ip6_mr_vif"); #endif - del_timer(&mrt->ipmr_expire_timer); - mroute_clean_tables(mrt); - kfree(mrt); + ip6mr_rules_exit(net); } static struct pernet_operations ip6mr_net_ops = { @@ -1279,28 +1518,39 @@ static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) int ip6mr_sk_done(struct sock *sk) { - int err = 0; + int err = -EACCES; struct net *net = sock_net(sk); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; rtnl_lock(); - if (sk == mrt->mroute6_sk) { - write_lock_bh(&mrt_lock); - mrt->mroute6_sk = NULL; - net->ipv6.devconf_all->mc_forwarding--; - write_unlock_bh(&mrt_lock); + ip6mr_for_each_table(mrt, net) { + if (sk == mrt->mroute6_sk) { + write_lock_bh(&mrt_lock); + mrt->mroute6_sk = NULL; + net->ipv6.devconf_all->mc_forwarding--; + write_unlock_bh(&mrt_lock); - mroute_clean_tables(mrt); - } else - err = -EACCES; + mroute_clean_tables(mrt); + err = 0; + break; + } + } rtnl_unlock(); return err; } -struct sock *mroute6_socket(struct net *net) +struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) { - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; + struct flowi fl = { + .iif = skb->skb_iif, + .oif = skb->dev->ifindex, + .mark = skb->mark, + }; + + if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) + return NULL; return mrt->mroute6_sk; } @@ -1319,7 +1569,11 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns struct mf6cctl mfc; mifi_t mifi; struct net *net = sock_net(sk); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; if (optname != MRT6_INIT) { if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) @@ -1408,6 +1662,27 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns return ret; } +#endif +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + case MRT6_TABLE: + { + u32 v; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(v, (u32 __user *)optval)) + return -EFAULT; + if (sk == mrt->mroute6_sk) + return -EBUSY; + + rtnl_lock(); + ret = 0; + if (!ip6mr_new_table(net, v)) + ret = -ENOMEM; + raw6_sk(sk)->ip6mr_table = v; + rtnl_unlock(); + return ret; + } #endif /* * Spurious command, or MRT6_VERSION which you cannot @@ -1428,7 +1703,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int olr; int val; struct net *net = sock_net(sk); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; switch (optname) { case MRT6_VERSION: @@ -1471,7 +1750,11 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) struct mif_device *vif; struct mfc6_cache *c; struct net *net = sock_net(sk); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; + + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; switch (cmd) { case SIOCGETMIFCNT_IN6: @@ -1683,7 +1966,16 @@ int ip6_mr_input(struct sk_buff *skb) { struct mfc6_cache *cache; struct net *net = dev_net(skb->dev); - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; + struct flowi fl = { + .iif = skb->dev->ifindex, + .mark = skb->mark, + }; + int err; + + err = ip6mr_fib_lookup(net, &fl, &mrt); + if (err < 0) + return err; read_lock(&mrt_lock); cache = ip6mr_cache_find(mrt, @@ -1758,10 +2050,14 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; - struct mr6_table *mrt = net->ipv6.mrt6; + struct mr6_table *mrt; struct mfc6_cache *cache; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); + if (mrt == NULL) + return -ENOENT; + read_lock(&mrt_lock); cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); -- cgit v1.2.3 From 5b285cac3570a935aaa28312c1ea28f9e01c5452 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 11 May 2010 14:40:56 +0200 Subject: ipv6: ip6mr: add support for dumping routing tables over netlink The ip6mr /proc interface (ip6_mr_cache) can't be extended to dump routes from any tables but the main table in a backwards compatible fashion since the output format ends in a variable amount of output interfaces. Introduce a new netlink interface to dump multicast routes from all tables, similar to the netlink interface for regular routes. Signed-off-by: Patrick McHardy --- net/ipv6/ip6mr.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 89 insertions(+), 7 deletions(-) (limited to 'net/ipv6') diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index c2920a1a6db..163850e22b1 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -112,8 +112,10 @@ static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, struct sk_buff *skb, struct mfc6_cache *cache); static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, mifi_t mifi, int assert); -static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, - struct mfc6_cache *c, struct rtmsg *rtm); +static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + struct mfc6_cache *c, struct rtmsg *rtm); +static int ip6mr_rtm_dumproute(struct sk_buff *skb, + struct netlink_callback *cb); static void mroute_clean_tables(struct mr6_table *mrt); static void ipmr_expire_process(unsigned long arg); @@ -1038,7 +1040,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, int err; struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); - if (ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { + if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; @@ -1350,6 +1352,7 @@ int __init ip6_mr_init(void) goto add_proto_fail; } #endif + rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute); return 0; #ifdef CONFIG_IPV6_PIMSM_V2 add_proto_fail: @@ -2007,9 +2010,8 @@ int ip6_mr_input(struct sk_buff *skb) } -static int -ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, - struct mfc6_cache *c, struct rtmsg *rtm) +static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + struct mfc6_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; @@ -2111,8 +2113,88 @@ int ip6mr_get_route(struct net *net, if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ip6mr_fill_mroute(mrt, skb, cache, rtm); + err = __ip6mr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); return err; } +static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + u32 pid, u32 seq, struct mfc6_cache *c) +{ + struct nlmsghdr *nlh; + struct rtmsg *rtm; + + nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); + if (nlh == NULL) + return -EMSGSIZE; + + rtm = nlmsg_data(nlh); + rtm->rtm_family = RTNL_FAMILY_IPMR; + rtm->rtm_dst_len = 128; + rtm->rtm_src_len = 128; + rtm->rtm_tos = 0; + rtm->rtm_table = mrt->id; + NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + rtm->rtm_scope = RT_SCOPE_UNIVERSE; + rtm->rtm_protocol = RTPROT_UNSPEC; + rtm->rtm_flags = 0; + + NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); + NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); + + if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct mr6_table *mrt; + struct mfc6_cache *mfc; + unsigned int t = 0, s_t; + unsigned int h = 0, s_h; + unsigned int e = 0, s_e; + + s_t = cb->args[0]; + s_h = cb->args[1]; + s_e = cb->args[2]; + + read_lock(&mrt_lock); + ip6mr_for_each_table(mrt, net) { + if (t < s_t) + goto next_table; + if (t > s_t) + s_h = 0; + for (h = s_h; h < MFC6_LINES; h++) { + list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) { + if (e < s_e) + goto next_entry; + if (ip6mr_fill_mroute(mrt, skb, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + mfc) < 0) + goto done; +next_entry: + e++; + } + e = s_e = 0; + } + s_h = 0; +next_table: + t++; + } +done: + read_unlock(&mrt_lock); + + cb->args[2] = e; + cb->args[1] = h; + cb->args[0] = t; + + return skb->len; +} -- cgit v1.2.3