aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 12:49:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 12:49:40 -0800
commit0191b625ca5a46206d2fb862bb08f36f2fcb3b31 (patch)
tree454d1842b1833d976da62abcbd5c47521ebe9bd7 /net/ipv4
parent54a696bd07c14d3b1192d03ce7269bc59b45209a (diff)
parenteb56092fc168bf5af199d47af50c0d84a96db898 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1429 commits) net: Allow dependancies of FDDI & Tokenring to be modular. igb: Fix build warning when DCA is disabled. net: Fix warning fallout from recent NAPI interface changes. gro: Fix potential use after free sfc: If AN is enabled, always read speed/duplex from the AN advertising bits sfc: When disabling the NIC, close the device rather than unregistering it sfc: SFT9001: Add cable diagnostics sfc: Add support for multiple PHY self-tests sfc: Merge top-level functions for self-tests sfc: Clean up PHY mode management in loopback self-test sfc: Fix unreliable link detection in some loopback modes sfc: Generate unique names for per-NIC workqueues 802.3ad: use standard ethhdr instead of ad_header 802.3ad: generalize out mac address initializer 802.3ad: initialize ports LACPDU from const initializer 802.3ad: remove typedef around ad_system 802.3ad: turn ports is_individual into a bool 802.3ad: turn ports is_enabled into a bool 802.3ad: make ntt bool ixgbe: Fix set_ringparam in ixgbe to use the same memory pools. ... Fixed trivial IPv4/6 address printing conflicts in fs/cifs/connect.c due to the conversion to %pI (in this networking merge) and the addition of doing IPv6 addresses (from the earlier merge of CIFS).
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c116
-rw-r--r--net/ipv4/ah4.c10
-rw-r--r--net/ipv4/arp.c36
-rw-r--r--net/ipv4/cipso_ipv4.c1
-rw-r--r--net/ipv4/devinet.c19
-rw-r--r--net/ipv4/esp4.c8
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_hash.c12
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/icmp.c39
-rw-r--r--net/ipv4/igmp.c95
-rw-r--r--net/ipv4/inet_connection_sock.c31
-rw-r--r--net/ipv4/inet_diag.c31
-rw-r--r--net/ipv4/inet_hashtables.c277
-rw-r--r--net/ipv4/inet_lro.c4
-rw-r--r--net/ipv4/inet_timewait_sock.c48
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_fragment.c21
-rw-r--r--net/ipv4/ip_gre.c58
-rw-r--r--net/ipv4/ip_input.c10
-rw-r--r--net/ipv4/ip_output.c24
-rw-r--r--net/ipv4/ip_sockglue.c72
-rw-r--r--net/ipv4/ipcomp.c10
-rw-r--r--net/ipv4/ipconfig.c40
-rw-r--r--net/ipv4/ipip.c37
-rw-r--r--net/ipv4/ipmr.c280
-rw-r--r--net/ipv4/netfilter.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c16
-rw-r--r--net/ipv4/netfilter/arptable_filter.c12
-rw-r--r--net/ipv4/netfilter/ip_tables.c12
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c9
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c7
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c16
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c27
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c58
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c27
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c9
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/route.c227
-rw-r--r--net/ipv4/sysctl_net_ipv4.c188
-rw-r--r--net/ipv4/tcp.c130
-rw-r--r--net/ipv4/tcp_cubic.c120
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c511
-rw-r--r--net/ipv4/tcp_ipv4.c137
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c219
-rw-r--r--net/ipv4/tcp_probe.c7
-rw-r--r--net/ipv4/tcp_timer.c14
-rw-r--r--net/ipv4/tcp_yeah.c4
-rw-r--r--net/ipv4/udp.c271
-rw-r--r--net/ipv4/udp_impl.h4
-rw-r--r--net/ipv4/udplite.c14
-rw-r--r--net/ipv4/xfrm4_input.c4
-rw-r--r--net/ipv4/xfrm4_policy.c15
-rw-r--r--net/ipv4/xfrm4_state.c2
62 files changed, 2136 insertions, 1286 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1aa2dc9e380..743f5542d65 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -94,6 +94,7 @@
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
+#include <net/checksum.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
@@ -245,7 +246,7 @@ static inline int inet_netns_ok(struct net *net, int protocol)
int hash;
struct net_protocol *ipprot;
- if (net == &init_net)
+ if (net_eq(net, &init_net))
return 1;
hash = protocol & (MAX_INET_PROTOS - 1);
@@ -272,10 +273,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol)
int try_loading_module = 0;
int err;
- if (sock->type != SOCK_RAW &&
- sock->type != SOCK_DGRAM &&
- !inet_ehash_secret)
- build_ehash_secret();
+ if (unlikely(!inet_ehash_secret))
+ if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
+ build_ehash_secret();
sock->state = SS_UNCONNECTED;
@@ -1070,11 +1070,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
return 0;
if (sysctl_ip_dynaddr > 1) {
- printk(KERN_INFO "%s(): shifting inet->"
- "saddr from " NIPQUAD_FMT " to " NIPQUAD_FMT "\n",
- __func__,
- NIPQUAD(old_saddr),
- NIPQUAD(new_saddr));
+ printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n",
+ __func__, &old_saddr, &new_saddr);
}
inet->saddr = inet->rcv_saddr = new_saddr;
@@ -1245,6 +1242,100 @@ out:
return segs;
}
+static struct sk_buff **inet_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct net_protocol *ops;
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ struct iphdr *iph;
+ int flush = 1;
+ int proto;
+ int id;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = ip_hdr(skb);
+ proto = iph->protocol & (MAX_INET_PROTOS - 1);
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet_protos[proto]);
+ if (!ops || !ops->gro_receive)
+ goto out_unlock;
+
+ if (iph->version != 4 || iph->ihl != 5)
+ goto out_unlock;
+
+ if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+ goto out_unlock;
+
+ flush = ntohs(iph->tot_len) != skb->len ||
+ iph->frag_off != htons(IP_DF);
+ id = ntohs(iph->id);
+
+ for (p = *head; p; p = p->next) {
+ struct iphdr *iph2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ iph2 = ip_hdr(p);
+
+ if (iph->protocol != iph2->protocol ||
+ iph->tos != iph2->tos ||
+ memcmp(&iph->saddr, &iph2->saddr, 8)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ /* All fields must match except length and checksum. */
+ NAPI_GRO_CB(p)->flush |=
+ memcmp(&iph->frag_off, &iph2->frag_off, 4) ||
+ (u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) != id;
+
+ NAPI_GRO_CB(p)->flush |= flush;
+ }
+
+ NAPI_GRO_CB(skb)->flush |= flush;
+ __skb_pull(skb, sizeof(*iph));
+ skb_reset_transport_header(skb);
+
+ pp = ops->gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int inet_gro_complete(struct sk_buff *skb)
+{
+ struct net_protocol *ops;
+ struct iphdr *iph = ip_hdr(skb);
+ int proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ int err = -ENOSYS;
+ __be16 newlen = htons(skb->len - skb_network_offset(skb));
+
+ csum_replace2(&iph->check, iph->tot_len, newlen);
+ iph->tot_len = newlen;
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet_protos[proto]);
+ if (WARN_ON(!ops || !ops->gro_complete))
+ goto out_unlock;
+
+ err = ops->gro_complete(skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol,
struct net *net)
@@ -1311,6 +1402,7 @@ EXPORT_SYMBOL_GPL(snmp_mib_free);
#ifdef CONFIG_IP_MULTICAST
static struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
+ .netns_ok = 1,
};
#endif
@@ -1319,6 +1411,8 @@ static struct net_protocol tcp_protocol = {
.err_handler = tcp_v4_err,
.gso_send_check = tcp_v4_gso_send_check,
.gso_segment = tcp_tso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
.no_policy = 1,
.netns_ok = 1,
};
@@ -1411,6 +1505,8 @@ static struct packet_type ip_packet_type = {
.func = ip_rcv,
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
+ .gro_receive = inet_gro_receive,
+ .gro_complete = inet_gro_complete,
};
static int __init inet_init(void)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 8219b7e0968..e878e494296 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -201,15 +201,16 @@ out:
static void ah4_err(struct sk_buff *skb, u32 info)
{
- struct iphdr *iph = (struct iphdr*)skb->data;
- struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2));
+ struct net *net = dev_net(skb->dev);
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return;
- x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
+ x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
@@ -293,9 +294,7 @@ static void ah_destroy(struct xfrm_state *x)
return;
kfree(ahp->work_icv);
- ahp->work_icv = NULL;
crypto_free_hash(ahp->tfm);
- ahp->tfm = NULL;
kfree(ahp);
}
@@ -316,6 +315,7 @@ static struct net_protocol ah4_protocol = {
.handler = xfrm4_rcv,
.err_handler = ah4_err,
.no_policy = 1,
+ .netns_ok = 1,
};
static int __init ah4_init(void)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 1a9dd66511f..29a74c01d8d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -506,7 +506,7 @@ int arp_bind_neighbour(struct dst_entry *dst)
if (dev == NULL)
return -EINVAL;
if (n == NULL) {
- __be32 nexthop = ((struct rtable*)dst)->rt_gateway;
+ __be32 nexthop = ((struct rtable *)dst)->rt_gateway;
if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT))
nexthop = 0;
n = __neigh_lookup_errno(
@@ -640,14 +640,14 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp_ptr=(unsigned char *)(arp+1);
memcpy(arp_ptr, src_hw, dev->addr_len);
- arp_ptr+=dev->addr_len;
- memcpy(arp_ptr, &src_ip,4);
- arp_ptr+=4;
+ arp_ptr += dev->addr_len;
+ memcpy(arp_ptr, &src_ip, 4);
+ arp_ptr += 4;
if (target_hw != NULL)
memcpy(arp_ptr, target_hw, dev->addr_len);
else
memset(arp_ptr, 0, dev->addr_len);
- arp_ptr+=dev->addr_len;
+ arp_ptr += dev->addr_len;
memcpy(arp_ptr, &dest_ip, 4);
return skb;
@@ -818,18 +818,18 @@ static int arp_process(struct sk_buff *skb)
addr_type = rt->rt_type;
if (addr_type == RTN_LOCAL) {
- n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
- if (n) {
- int dont_send = 0;
-
- if (!dont_send)
- dont_send |= arp_ignore(in_dev,sip,tip);
- if (!dont_send && IN_DEV_ARPFILTER(in_dev))
- dont_send |= arp_filter(sip,tip,dev);
- if (!dont_send)
- arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
+ int dont_send = 0;
- neigh_release(n);
+ if (!dont_send)
+ dont_send |= arp_ignore(in_dev,sip,tip);
+ if (!dont_send && IN_DEV_ARPFILTER(in_dev))
+ dont_send |= arp_filter(sip,tip,dev);
+ if (!dont_send) {
+ n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
+ if (n) {
+ arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
+ neigh_release(n);
+ }
}
goto out;
} else if (IN_DEV_FORWARD(in_dev)) {
@@ -1308,7 +1308,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
}
#endif
- sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->primary_key));
+ sprintf(tbuf, "%pI4", n->primary_key);
seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
read_unlock(&n->lock);
@@ -1321,7 +1321,7 @@ static void arp_format_pneigh_entry(struct seq_file *seq,
int hatype = dev ? dev->type : 0;
char tbuf[16];
- sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->key));
+ sprintf(tbuf, "%pI4", n->key);
seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00",
dev ? dev->name : "*");
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2e78f6bd977..e52799047a5 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -490,7 +490,6 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def)
}
atomic_set(&doi_def->refcount, 1);
- INIT_RCU_HEAD(&doi_def->rcu);
spin_lock(&cipso_v4_doi_list_lock);
if (cipso_v4_doi_search(doi_def->doi) != NULL)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 56fce3ab6c5..309997edc8a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -112,13 +112,7 @@ static inline void devinet_sysctl_unregister(struct in_device *idev)
static struct in_ifaddr *inet_alloc_ifa(void)
{
- struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
-
- if (ifa) {
- INIT_RCU_HEAD(&ifa->rcu_head);
- }
-
- return ifa;
+ return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
}
static void inet_rcu_free_ifa(struct rcu_head *head)
@@ -161,7 +155,6 @@ static struct in_device *inetdev_init(struct net_device *dev)
in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
if (!in_dev)
goto out;
- INIT_RCU_HEAD(&in_dev->rcu_head);
memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
sizeof(in_dev->cnf));
in_dev->cnf.sysctl = NULL;
@@ -1108,7 +1101,7 @@ out:
}
static struct notifier_block ip_netdev_notifier = {
- .notifier_call =inetdev_event,
+ .notifier_call = inetdev_event,
};
static inline size_t inet_nlmsg_size(void)
@@ -1195,7 +1188,7 @@ done:
return skb->len;
}
-static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
+static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 pid)
{
struct sk_buff *skb;
@@ -1262,7 +1255,7 @@ static void inet_forward_change(struct net *net)
}
static int devinet_conf_proc(ctl_table *ctl, int write,
- struct file* filp, void __user *buffer,
+ struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
@@ -1334,7 +1327,7 @@ static int devinet_conf_sysctl(ctl_table *table,
}
static int devinet_sysctl_forward(ctl_table *ctl, int write,
- struct file* filp, void __user *buffer,
+ struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
@@ -1363,7 +1356,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
}
int ipv4_doint_and_flush(ctl_table *ctl, int write,
- struct file* filp, void __user *buffer,
+ struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 21515d4c49e..18bb383ea39 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -413,15 +413,16 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
static void esp4_err(struct sk_buff *skb, u32 info)
{
- struct iphdr *iph = (struct iphdr*)skb->data;
- struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
+ struct net *net = dev_net(skb->dev);
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return;
- x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
+ x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
return;
NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
@@ -618,6 +619,7 @@ static struct net_protocol esp4_protocol = {
.handler = xfrm4_rcv,
.err_handler = esp4_err,
.no_policy = 1,
+ .netns_ok = 1,
};
static int __init esp4_init(void)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 65c1503f8cc..741e4fa3e47 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -578,7 +578,7 @@ errout:
return err;
}
-static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct fib_config cfg;
@@ -600,7 +600,7 @@ errout:
return err;
}
-static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct fib_config cfg;
@@ -903,7 +903,7 @@ static void fib_disable_ip(struct net_device *dev, int force)
static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- struct in_ifaddr *ifa = (struct in_ifaddr*)ptr;
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
switch (event) {
@@ -964,11 +964,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
}
static struct notifier_block fib_inetaddr_notifier = {
- .notifier_call =fib_inetaddr_event,
+ .notifier_call = fib_inetaddr_event,
};
static struct notifier_block fib_netdev_notifier = {
- .notifier_call =fib_netdev_event,
+ .notifier_call = fib_netdev_event,
};
static int __net_init ip_fib_net_init(struct net *net)
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index c8cac6c7f88..ded8c44fb84 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -247,7 +247,7 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
{
int err;
struct fn_zone *fz;
- struct fn_hash *t = (struct fn_hash*)tb->tb_data;
+ struct fn_hash *t = (struct fn_hash *)tb->tb_data;
read_lock(&fib_hash_lock);
for (fz = t->fn_zone_list; fz; fz = fz->fz_next) {
@@ -283,7 +283,7 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
struct fib_node *f;
struct fib_info *fi = NULL;
struct fib_info *last_resort;
- struct fn_hash *t = (struct fn_hash*)tb->tb_data;
+ struct fn_hash *t = (struct fn_hash *)tb->tb_data;
struct fn_zone *fz = t->fn_zones[0];
if (fz == NULL)
@@ -548,7 +548,7 @@ out:
static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg)
{
- struct fn_hash *table = (struct fn_hash*)tb->tb_data;
+ struct fn_hash *table = (struct fn_hash *)tb->tb_data;
struct fib_node *f;
struct fib_alias *fa, *fa_to_delete;
struct fn_zone *fz;
@@ -748,7 +748,7 @@ static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin
{
int m, s_m;
struct fn_zone *fz;
- struct fn_hash *table = (struct fn_hash*)tb->tb_data;
+ struct fn_hash *table = (struct fn_hash *)tb->tb_data;
s_m = cb->args[2];
read_lock(&fib_hash_lock);
@@ -845,10 +845,10 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
struct hlist_node *node;
struct fib_node *fn;
- hlist_for_each_entry(fn,node,iter->hash_head,fn_hash) {
+ hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
struct fib_alias *fa;
- list_for_each_entry(fa,&fn->fn_alias,fa_list) {
+ list_for_each_entry(fa, &fn->fn_alias, fa_list) {
iter->fn = fn;
iter->fa = fa;
goto out;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ded2ae34eab..4817dea3bc7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -63,16 +63,16 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \
-for (nhsel=0, nh = (struct fib_nh*)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
+for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
/* Hope, that gcc will optimize it to get rid of dummy loop */
-#define for_nexthops(fi) { int nhsel=0; const struct fib_nh * nh = (fi)->fib_nh; \
+#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
for (nhsel=0; nhsel < 1; nhsel++)
-#define change_nexthops(fi) { int nhsel=0; struct fib_nh * nh = (struct fib_nh*)((fi)->fib_nh); \
+#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \
for (nhsel=0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -358,7 +358,7 @@ int fib_detect_death(struct fib_info *fi, int order,
state = n->nud_state;
neigh_release(n);
}
- if (state==NUD_REACHABLE)
+ if (state == NUD_REACHABLE)
return 0;
if ((state&NUD_VALID) && order != dflt)
return 0;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5cb72786a8a..ec0ae490f0b 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2399,8 +2399,8 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
__be32 prf = htonl(mask_pfx(tn->key, tn->pos));
seq_indent(seq, iter->depth-1);
- seq_printf(seq, " +-- " NIPQUAD_FMT "/%d %d %d %d\n",
- NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
+ seq_printf(seq, " +-- %pI4/%d %d %d %d\n",
+ &prf, tn->pos, tn->bits, tn->full_children,
tn->empty_children);
} else {
@@ -2410,7 +2410,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
__be32 val = htonl(l->key);
seq_indent(seq, iter->depth);
- seq_printf(seq, " |-- " NIPQUAD_FMT "\n", NIPQUAD(val));
+ seq_printf(seq, " |-- %pI4\n", &val);
hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
struct fib_alias *fa;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 72b2de76f1c..705b33b184a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -321,12 +321,12 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
}
static void icmp_push_reply(struct icmp_bxm *icmp_param,
- struct ipcm_cookie *ipc, struct rtable *rt)
+ struct ipcm_cookie *ipc, struct rtable **rt)
{
struct sock *sk;
struct sk_buff *skb;
- sk = icmp_sk(dev_net(rt->u.dst.dev));
+ sk = icmp_sk(dev_net((*rt)->u.dst.dev));
if (ip_append_data(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
@@ -392,7 +392,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
}
if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
icmp_param->data.icmph.code))
- icmp_push_reply(icmp_param, &ipc, rt);
+ icmp_push_reply(icmp_param, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
@@ -562,7 +562,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
/* No need to clone since we're just using its address. */
rt2 = rt;
- err = xfrm_lookup((struct dst_entry **)&rt, &fl, NULL, 0);
+ err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
switch (err) {
case 0:
if (rt != rt2)
@@ -601,7 +601,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
if (err)
goto relookup_failed;
- err = xfrm_lookup((struct dst_entry **)&rt2, &fl, NULL,
+ err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
XFRM_LOOKUP_ICMP);
switch (err) {
case 0:
@@ -635,7 +635,7 @@ route_done:
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
- icmp_push_reply(&icmp_param, &ipc, rt);
+ icmp_push_reply(&icmp_param, &ipc, &rt);
ende:
ip_rt_put(rt);
out_unlock:
@@ -683,10 +683,8 @@ static void icmp_unreach(struct sk_buff *skb)
break;
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
- LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": "
- "fragmentation needed "
- "and DF set.\n",
- NIPQUAD(iph->daddr));
+ LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n",
+ &iph->daddr);
} else {
info = ip_rt_frag_needed(net, iph,
ntohs(icmph->un.frag.mtu),
@@ -696,9 +694,8 @@ static void icmp_unreach(struct sk_buff *skb)
}
break;
case ICMP_SR_FAILED:
- LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": Source "
- "Route Failed.\n",
- NIPQUAD(iph->daddr));
+ LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n",
+ &iph->daddr);
break;
default:
break;
@@ -729,12 +726,12 @@ static void icmp_unreach(struct sk_buff *skb)
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
if (net_ratelimit())
- printk(KERN_WARNING NIPQUAD_FMT " sent an invalid ICMP "
+ printk(KERN_WARNING "%pI4 sent an invalid ICMP "
"type %u, code %u "
- "error to a broadcast: " NIPQUAD_FMT " on %s\n",
- NIPQUAD(ip_hdr(skb)->saddr),
+ "error to a broadcast: %pI4 on %s\n",
+ &ip_hdr(skb)->saddr,
icmph->type, icmph->code,
- NIPQUAD(iph->daddr),
+ &iph->daddr,
skb->dev->name);
goto out;
}
@@ -952,9 +949,8 @@ static void icmp_address_reply(struct sk_buff *skb)
break;
}
if (!ifa && net_ratelimit()) {
- printk(KERN_INFO "Wrong address mask " NIPQUAD_FMT " from "
- "%s/" NIPQUAD_FMT "\n",
- NIPQUAD(*mp), dev->name, NIPQUAD(rt->rt_src));
+ printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
+ mp, dev->name, &rt->rt_src);
}
}
rcu_read_unlock();
@@ -976,9 +972,10 @@ int icmp_rcv(struct sk_buff *skb)
struct net *net = dev_net(rt->u.dst.dev);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ struct sec_path *sp = skb_sec_path(skb);
int nh;
- if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags &
+ if (!(sp && sp->xvec[sp->len - 1]->props.flags &
XFRM_STATE_ICMP))
goto drop;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a0d86455c53..9eb6219af61 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -167,7 +167,7 @@ static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
spin_lock_bh(&im->lock);
if (del_timer(&im->timer))
atomic_dec(&im->refcnt);
- im->tm_running=0;
+ im->tm_running = 0;
im->reporter = 0;
im->unsolicit_count = 0;
spin_unlock_bh(&im->lock);
@@ -176,9 +176,9 @@ static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
- int tv=net_random() % max_delay;
+ int tv = net_random() % max_delay;
- im->tm_running=1;
+ im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
atomic_inc(&im->refcnt);
}
@@ -207,7 +207,7 @@ static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
if (del_timer(&im->timer)) {
if ((long)(im->timer.expires-jiffies) < max_delay) {
add_timer(&im->timer);
- im->tm_running=1;
+ im->tm_running = 1;
spin_unlock_bh(&im->lock);
return;
}
@@ -358,7 +358,7 @@ static int igmpv3_sendpack(struct sk_buff *skb)
static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
{
- return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc,type,gdel,sdel);
+ return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
}
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
@@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
return -1;
}
- skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
@@ -682,11 +682,11 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
((u8*)&iph[1])[3] = 0;
ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
- ih->type=type;
- ih->code=0;
- ih->csum=0;
- ih->group=group;
- ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr));
+ ih->type = type;
+ ih->code = 0;
+ ih->csum = 0;
+ ih->group = group;
+ ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
return ip_local_out(skb);
}
@@ -728,7 +728,7 @@ static void igmp_timer_expire(unsigned long data)
struct in_device *in_dev = im->interface;
spin_lock(&im->lock);
- im->tm_running=0;
+ im->tm_running = 0;
if (im->unsolicit_count) {
im->unsolicit_count--;
@@ -997,7 +997,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
--ANK
*/
if (arp_mc_map(addr, buf, dev, 0) == 0)
- dev_mc_add(dev,buf,dev->addr_len,0);
+ dev_mc_add(dev, buf, dev->addr_len, 0);
}
/*
@@ -1010,7 +1010,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
struct net_device *dev = in_dev->dev;
if (arp_mc_map(addr, buf, dev, 0) == 0)
- dev_mc_delete(dev,buf,dev->addr_len,0);
+ dev_mc_delete(dev, buf, dev->addr_len, 0);
}
#ifdef CONFIG_IP_MULTICAST
@@ -1210,10 +1210,10 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
if (!im)
goto out;
- im->users=1;
- im->interface=in_dev;
+ im->users = 1;
+ im->interface = in_dev;
in_dev_hold(in_dev);
- im->multiaddr=addr;
+ im->multiaddr = addr;
/* initial mode is (EX, empty) */
im->sfmode = MCAST_EXCLUDE;
im->sfcount[MCAST_INCLUDE] = 0;
@@ -1224,7 +1224,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
atomic_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
- im->tm_running=0;
+ im->tm_running = 0;
setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
im->reporter = 0;
@@ -1232,8 +1232,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
#endif
im->loaded = 0;
write_lock_bh(&in_dev->mc_list_lock);
- im->next=in_dev->mc_list;
- in_dev->mc_list=im;
+ im->next = in_dev->mc_list;
+ in_dev->mc_list = im;
in_dev->mc_count++;
write_unlock_bh(&in_dev->mc_list_lock);
#ifdef CONFIG_IP_MULTICAST
@@ -1279,7 +1279,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
- if (i->multiaddr==addr) {
+ if (i->multiaddr == addr) {
if (--i->users == 0) {
write_lock_bh(&in_dev->mc_list_lock);
*ip = i->next;
@@ -1738,7 +1738,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
{
int err;
__be32 addr = imr->imr_multiaddr.s_addr;
- struct ip_mc_socklist *iml=NULL, *i;
+ struct ip_mc_socklist *iml = NULL, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
@@ -1769,7 +1769,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
err = -ENOBUFS;
if (count >= sysctl_igmp_max_memberships)
goto done;
- iml = sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL);
+ iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
if (iml == NULL)
goto done;
@@ -2275,6 +2275,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
#if defined(CONFIG_PROC_FS)
struct igmp_mc_iter_state {
+ struct seq_net_private p;
struct net_device *dev;
struct in_device *in_dev;
};
@@ -2283,11 +2284,12 @@ struct igmp_mc_iter_state {
static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
{
+ struct net *net = seq_file_net(seq);
struct ip_mc_list *im = NULL;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
state->in_dev = NULL;
- for_each_netdev(&init_net, state->dev) {
+ for_each_netdev(net, state->dev) {
struct in_device *in_dev;
in_dev = in_dev_get(state->dev);
if (!in_dev)
@@ -2408,7 +2410,7 @@ static const struct seq_operations igmp_mc_seq_ops = {
static int igmp_mc_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &igmp_mc_seq_ops,
+ return seq_open_net(inode, file, &igmp_mc_seq_ops,
sizeof(struct igmp_mc_iter_state));
}
@@ -2417,10 +2419,11 @@ static const struct file_operations igmp_mc_seq_fops = {
.open = igmp_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
struct igmp_mcf_iter_state {
+ struct seq_net_private p;
struct net_device *dev;
struct in_device *idev;
struct ip_mc_list *im;
@@ -2430,13 +2433,14 @@ struct igmp_mcf_iter_state {
static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
{
+ struct net *net = seq_file_net(seq);
struct ip_sf_list *psf = NULL;
struct ip_mc_list *im = NULL;
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
state->idev = NULL;
state->im = NULL;
- for_each_netdev(&init_net, state->dev) {
+ for_each_netdev(net, state->dev) {
struct in_device *idev;
idev = in_dev_get(state->dev);
if (unlikely(idev == NULL))
@@ -2567,7 +2571,7 @@ static const struct seq_operations igmp_mcf_seq_ops = {
static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &igmp_mcf_seq_ops,
+ return seq_open_net(inode, file, &igmp_mcf_seq_ops,
sizeof(struct igmp_mcf_iter_state));
}
@@ -2576,14 +2580,41 @@ static const struct file_operations igmp_mcf_seq_fops = {
.open = igmp_mcf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
-int __init igmp_mc_proc_init(void)
+static int igmp_net_init(struct net *net)
{
- proc_net_fops_create(&init_net, "igmp", S_IRUGO, &igmp_mc_seq_fops);
- proc_net_fops_create(&init_net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
+ struct proc_dir_entry *pde;
+
+ pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops);
+ if (!pde)
+ goto out_igmp;
+ pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
+ if (!pde)
+ goto out_mcfilter;
return 0;
+
+out_mcfilter:
+ proc_net_remove(net, "igmp");
+out_igmp:
+ return -ENOMEM;
+}
+
+static void igmp_net_exit(struct net *net)
+{
+ proc_net_remove(net, "mcfilter");
+ proc_net_remove(net, "igmp");
+}
+
+static struct pernet_operations igmp_net_ops = {
+ .init = igmp_net_init,
+ .exit = igmp_net_exit,
+};
+
+int __init igmp_mc_proc_init(void)
+{
+ return register_pernet_subsys(&igmp_net_ops);
}
#endif
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bd1278a2d82..c7cda1ca8e6 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -109,7 +109,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
- if (tb->ib_net == net && tb->port == rover)
+ if (ib_net(tb) == net && tb->port == rover)
goto next;
break;
next:
@@ -137,7 +137,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
- if (tb->ib_net == net && tb->port == snum)
+ if (ib_net(tb) == net && tb->port == snum)
goto tb_found;
}
tb = NULL;
@@ -323,7 +323,7 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
-struct dst_entry* inet_csk_route_req(struct sock *sk,
+struct dst_entry *inet_csk_route_req(struct sock *sk,
const struct request_sock *req)
{
struct rtable *rt;
@@ -344,16 +344,17 @@ struct dst_entry* inet_csk_route_req(struct sock *sk,
struct net *net = sock_net(sk);
security_req_classify_flow(req, &fl);
- if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
- return NULL;
- }
- if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
- ip_rt_put(rt);
- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
- return NULL;
- }
+ if (ip_route_output_flow(net, &rt, &fl, sk, 0))
+ goto no_route;
+ if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
+ goto route_err;
return &rt->u.dst;
+
+route_err:
+ ip_rt_put(rt);
+no_route:
+ IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+ return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);
@@ -561,7 +562,7 @@ void inet_csk_destroy_sock(struct sock *sk)
sk_refcnt_debug_release(sk);
- atomic_dec(sk->sk_prot->orphan_count);
+ percpu_counter_dec(sk->sk_prot->orphan_count);
sock_put(sk);
}
@@ -632,6 +633,8 @@ void inet_csk_listen_stop(struct sock *sk)
acc_req = req->dl_next;
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
@@ -641,8 +644,6 @@ void inet_csk_listen_stop(struct sock *sk)
sock_orphan(child);
- atomic_inc(sk->sk_prot->orphan_count);
-
inet_csk_destroy_sock(child);
bh_unlock_sock(child);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 564230dabcb..588a7796e3e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -718,13 +718,15 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
goto skip_listen_ht;
- inet_listen_lock(hashinfo);
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct sock *sk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
+ struct inet_listen_hashbucket *ilb;
num = 0;
- sk_for_each(sk, node, &hashinfo->listening_hash[i]) {
+ ilb = &hashinfo->listening_hash[i];
+ spin_lock_bh(&ilb->lock);
+ sk_nulls_for_each(sk, node, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num) {
@@ -742,7 +744,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto syn_recv;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
- inet_listen_unlock(hashinfo);
+ spin_unlock_bh(&ilb->lock);
goto done;
}
@@ -751,7 +753,7 @@ syn_recv:
goto next_listen;
if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
- inet_listen_unlock(hashinfo);
+ spin_unlock_bh(&ilb->lock);
goto done;
}
@@ -760,12 +762,12 @@ next_listen:
cb->args[4] = 0;
++num;
}
+ spin_unlock_bh(&ilb->lock);
s_num = 0;
cb->args[3] = 0;
cb->args[4] = 0;
}
- inet_listen_unlock(hashinfo);
skip_listen_ht:
cb->args[0] = 1;
s_i = num = s_num = 0;
@@ -776,20 +778,21 @@ skip_listen_ht:
for (i = s_i; i < hashinfo->ehash_size; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
- rwlock_t *lock = inet_ehash_lockp(hashinfo, i);
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
struct sock *sk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
num = 0;
- if (hlist_empty(&head->chain) && hlist_empty(&head->twchain))
+ if (hlist_nulls_empty(&head->chain) &&
+ hlist_nulls_empty(&head->twchain))
continue;
if (i > s_i)
s_num = 0;
- read_lock_bh(lock);
- sk_for_each(sk, node, &head->chain) {
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
@@ -803,7 +806,7 @@ skip_listen_ht:
r->id.idiag_dport)
goto next_normal;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
goto done;
}
next_normal:
@@ -825,14 +828,14 @@ next_normal:
r->id.idiag_dport)
goto next_dying;
if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
goto done;
}
next_dying:
++num;
}
}
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
}
done:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 44981906fb9..6a1045da48d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -35,7 +35,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb != NULL) {
- tb->ib_net = hold_net(net);
+ write_pnet(&tb->ib_net, hold_net(net));
tb->port = snum;
tb->fastreuse = 0;
INIT_HLIST_HEAD(&tb->owners);
@@ -51,7 +51,7 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
{
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
- release_net(tb->ib_net);
+ release_net(ib_net(tb));
kmem_cache_free(cachep, tb);
}
}
@@ -110,33 +110,29 @@ void __inet_inherit_port(struct sock *sk, struct sock *child)
EXPORT_SYMBOL_GPL(__inet_inherit_port);
-/*
- * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
- * Look, when several writers sleep and reader wakes them up, all but one
- * immediately hit write lock and grab all the cpus. Exclusive sleep solves
- * this, _but_ remember, it adds useless work on UP machines (wake up each
- * exclusive lock release). It should be ifdefed really.
- */
-void inet_listen_wlock(struct inet_hashinfo *hashinfo)
- __acquires(hashinfo->lhash_lock)
+static inline int compute_score(struct sock *sk, struct net *net,
+ const unsigned short hnum, const __be32 daddr,
+ const int dif)
{
- write_lock(&hashinfo->lhash_lock);
-
- if (atomic_read(&hashinfo->lhash_users)) {
- DEFINE_WAIT(wait);
+ int score = -1;
+ struct inet_sock *inet = inet_sk(sk);
- for (;;) {
- prepare_to_wait_exclusive(&hashinfo->lhash_wait,
- &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&hashinfo->lhash_users))
- break;
- write_unlock_bh(&hashinfo->lhash_lock);
- schedule();
- write_lock_bh(&hashinfo->lhash_lock);
+ if (net_eq(sock_net(sk), net) && inet->num == hnum &&
+ !ipv6_only_sock(sk)) {
+ __be32 rcv_saddr = inet->rcv_saddr;
+ score = sk->sk_family == PF_INET ? 1 : 0;
+ if (rcv_saddr) {
+ if (rcv_saddr != daddr)
+ return -1;
+ score += 2;
+ }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score += 2;
}
-
- finish_wait(&hashinfo->lhash_wait, &wait);
}
+ return score;
}
/*
@@ -145,72 +141,48 @@ void inet_listen_wlock(struct inet_hashinfo *hashinfo)
* remote address for the connection. So always assume those are both
* wildcarded during the search since they can never be otherwise.
*/
-static struct sock *inet_lookup_listener_slow(struct net *net,
- const struct hlist_head *head,
- const __be32 daddr,
- const unsigned short hnum,
- const int dif)
-{
- struct sock *result = NULL, *sk;
- const struct hlist_node *node;
- int hiscore = -1;
-
- sk_for_each(sk, node, head) {
- const struct inet_sock *inet = inet_sk(sk);
-
- if (net_eq(sock_net(sk), net) && inet->num == hnum &&
- !ipv6_only_sock(sk)) {
- const __be32 rcv_saddr = inet->rcv_saddr;
- int score = sk->sk_family == PF_INET ? 1 : 0;
-
- if (rcv_saddr) {
- if (rcv_saddr != daddr)
- continue;
- score += 2;
- }
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- continue;
- score += 2;
- }
- if (score == 5)
- return sk;
- if (score > hiscore) {
- hiscore = score;
- result = sk;
- }
- }
- }
- return result;
-}
-/* Optimize the common listener case. */
+
struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 daddr, const unsigned short hnum,
const int dif)
{
- struct sock *sk = NULL;
- const struct hlist_head *head;
-
- read_lock(&hashinfo->lhash_lock);
- head = &hashinfo->listening_hash[inet_lhashfn(net, hnum)];
- if (!hlist_empty(head)) {
- const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
-
- if (inet->num == hnum && !sk->sk_node.next &&
- (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
- (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
- !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
- goto sherry_cache;
- sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
+ struct sock *sk, *result;
+ struct hlist_nulls_node *node;
+ unsigned int hash = inet_lhashfn(net, hnum);
+ struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
+ int score, hiscore;
+
+ rcu_read_lock();
+begin:
+ result = NULL;
+ hiscore = -1;
+ sk_nulls_for_each_rcu(sk, node, &ilb->head) {
+ score = compute_score(sk, net, hnum, daddr, dif);
+ if (score > hiscore) {
+ result = sk;
+ hiscore = score;
+ }
}
- if (sk) {
-sherry_cache:
- sock_hold(sk);
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
+ goto begin;
+ if (result) {
+ if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ result = NULL;
+ else if (unlikely(compute_score(result, net, hnum, daddr,
+ dif) < hiscore)) {
+ sock_put(result);
+ goto begin;
+ }
}
- read_unlock(&hashinfo->lhash_lock);
- return sk;
+ rcu_read_unlock();
+ return result;
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
@@ -223,35 +195,65 @@ struct sock * __inet_lookup_established(struct net *net,
INET_ADDR_COOKIE(acookie, saddr, daddr)
const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
struct sock *sk;
- const struct hlist_node *node;
+ const struct hlist_nulls_node *node;
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
- struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
- rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
+ unsigned int slot = hash & (hashinfo->ehash_size - 1);
+ struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
- prefetch(head->chain.first);
- read_lock(lock);
- sk_for_each(sk, node, &head->chain) {
+ rcu_read_lock();
+begin:
+ sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (INET_MATCH(sk, net, hash, acookie,
- saddr, daddr, ports, dif))
- goto hit; /* You sunk my battleship! */
+ saddr, daddr, ports, dif)) {
+ if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
+ goto begintw;
+ if (unlikely(!INET_MATCH(sk, net, hash, acookie,
+ saddr, daddr, ports, dif))) {
+ sock_put(sk);
+ goto begin;
+ }
+ goto out;
+ }
}
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot)
+ goto begin;
+begintw:
/* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_for_each(sk, node, &head->twchain) {
+ sk_nulls_for_each_rcu(sk, node, &head->twchain) {
if (INET_TW_MATCH(sk, net, hash, acookie,
- saddr, daddr, ports, dif))
- goto hit;
+ saddr, daddr, ports, dif)) {
+ if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
+ sk = NULL;
+ goto out;
+ }
+ if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie,
+ saddr, daddr, ports, dif))) {
+ sock_put(sk);
+ goto begintw;
+ }
+ goto out;
+ }
}
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot)
+ goto begintw;
sk = NULL;
out:
- read_unlock(lock);
+ rcu_read_unlock();
return sk;
-hit:
- sock_hold(sk);
- goto out;
}
EXPORT_SYMBOL_GPL(__inet_lookup_established);
@@ -270,16 +272,15 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
struct net *net = sock_net(sk);
unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
- rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
+ spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
- const struct hlist_node *node;
+ const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
- prefetch(head->chain.first);
- write_lock(lock);
+ spin_lock(lock);
/* Check TIME-WAIT sockets first. */
- sk_for_each(sk2, node, &head->twchain) {
+ sk_nulls_for_each(sk2, node, &head->twchain) {
tw = inet_twsk(sk2);
if (INET_TW_MATCH(sk2, net, hash, acookie,
@@ -293,7 +294,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
tw = NULL;
/* And established part... */
- sk_for_each(sk2, node, &head->chain) {
+ sk_nulls_for_each(sk2, node, &head->chain) {
if (INET_MATCH(sk2, net, hash, acookie,
saddr, daddr, ports, dif))
goto not_unique;
@@ -306,9 +307,9 @@ unique:
inet->sport = htons(lport);
sk->sk_hash = hash;
WARN_ON(!sk_unhashed(sk));
- __sk_add_node(sk, &head->chain);
+ __sk_nulls_add_node_rcu(sk, &head->chain);
+ spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock(lock);
if (twp) {
*twp = tw;
@@ -324,7 +325,7 @@ unique:
return 0;
not_unique:
- write_unlock(lock);
+ spin_unlock(lock);
return -EADDRNOTAVAIL;
}
@@ -338,8 +339,8 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
void __inet_hash_nolisten(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
- struct hlist_head *list;
- rwlock_t *lock;
+ struct hlist_nulls_head *list;
+ spinlock_t *lock;
struct inet_ehash_bucket *head;
WARN_ON(!sk_unhashed(sk));
@@ -349,18 +350,17 @@ void __inet_hash_nolisten(struct sock *sk)
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
- write_lock(lock);
- __sk_add_node(sk, list);
+ spin_lock(lock);
+ __sk_nulls_add_node_rcu(sk, list);
+ spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
static void __inet_hash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
- struct hlist_head *list;
- rwlock_t *lock;
+ struct inet_listen_hashbucket *ilb;
if (sk->sk_state != TCP_LISTEN) {
__inet_hash_nolisten(sk);
@@ -368,14 +368,12 @@ static void __inet_hash(struct sock *sk)
}
WARN_ON(!sk_unhashed(sk));
- list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
- lock = &hashinfo->lhash_lock;
+ ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
- inet_listen_wlock(hashinfo);
- __sk_add_node(sk, list);
+ spin_lock(&ilb->lock);
+ __sk_nulls_add_node_rcu(sk, &ilb->head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- write_unlock(lock);
- wake_up(&hashinfo->lhash_wait);
+ spin_unlock(&ilb->lock);
}
void inet_hash(struct sock *sk)
@@ -390,27 +388,23 @@ EXPORT_SYMBOL_GPL(inet_hash);
void inet_unhash(struct sock *sk)
{
- rwlock_t *lock;
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+ spinlock_t *lock;
+ int done;
if (sk_unhashed(sk))
- goto out;
+ return;
- if (sk->sk_state == TCP_LISTEN) {
- local_bh_disable();
- inet_listen_wlock(hashinfo);
- lock = &hashinfo->lhash_lock;
- } else {
+ if (sk->sk_state == TCP_LISTEN)
+ lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
+ else
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
- write_lock_bh(lock);
- }
- if (__sk_del_node_init(sk))
+ spin_lock_bh(lock);
+ done =__sk_nulls_del_node_init_rcu(sk);
+ if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(lock);
-out:
- if (sk->sk_state == TCP_LISTEN)
- wake_up(&hashinfo->lhash_wait);
+ spin_unlock_bh(lock);
}
EXPORT_SYMBOL_GPL(inet_unhash);
@@ -449,7 +443,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
* unique enough.
*/
inet_bind_bucket_for_each(tb, node, &head->chain) {
- if (tb->ib_net == net && tb->port == port) {
+ if (ib_net(tb) == net && tb->port == port) {
WARN_ON(hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
goto next_port;
@@ -524,3 +518,16 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
}
EXPORT_SYMBOL_GPL(inet_hash_connect);
+
+void inet_hashinfo_init(struct inet_hashinfo *h)
+{
+ int i;
+
+ for (i = 0; i < INET_LHTABLE_SIZE; i++) {
+ spin_lock_init(&h->listening_hash[i].lock);
+ INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
+ i + LISTENING_NULLS_BASE);
+ }
+}
+
+EXPORT_SYMBOL_GPL(inet_hashinfo_init);
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index cfd034a2b96..6a667dae315 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -120,7 +120,7 @@ static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
tcph->check = 0;
- tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
+ tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0);
lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
lro_desc->ip_tot_len -
@@ -135,7 +135,7 @@ static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
__wsum tcp_ps_hdr_csum;
tcp_csum = ~csum_unfold(tcph->check);
- tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
+ tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), tcp_csum);
tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
len + TCP_HDR_LEN(tcph),
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 1c5fd38f882..8554d0ea171 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,16 +20,16 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb;
/* Unlink from established hashes. */
- rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
- write_lock(lock);
- if (hlist_unhashed(&tw->tw_node)) {
- write_unlock(lock);
+ spin_lock(lock);
+ if (hlist_nulls_unhashed(&tw->tw_node)) {
+ spin_unlock(lock);
return;
}
- __hlist_del(&tw->tw_node);
- sk_node_init(&tw->tw_node);
- write_unlock(lock);
+ hlist_nulls_del_rcu(&tw->tw_node);
+ sk_nulls_node_init(&tw->tw_node);
+ spin_unlock(lock);
/* Disassociate with bind bucket. */
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
@@ -76,7 +76,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
- rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet->num != 0 MUST be bound in
@@ -90,17 +90,21 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock);
- write_lock(lock);
+ spin_lock(lock);
- /* Step 2: Remove SK from established hash. */
- if (__sk_del_node_init(sk))
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-
- /* Step 3: Hash TW into TIMEWAIT chain. */
- inet_twsk_add_node(tw, &ehead->twchain);
+ /*
+ * Step 2: Hash TW into TIMEWAIT chain.
+ * Should be done before removing sk from established chain
+ * because readers are lockless and search established first.
+ */
atomic_inc(&tw->tw_refcnt);
+ inet_twsk_add_node_rcu(tw, &ehead->twchain);
- write_unlock(lock);
+ /* Step 3: Remove SK from established hash. */
+ if (__sk_nulls_del_node_init_rcu(sk))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+
+ spin_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
@@ -416,17 +420,17 @@ void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
{
struct inet_timewait_sock *tw;
struct sock *sk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
int h;
local_bh_disable();
for (h = 0; h < (hashinfo->ehash_size); h++) {
struct inet_ehash_bucket *head =
inet_ehash_bucket(hashinfo, h);
- rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
restart:
- write_lock(lock);
- sk_for_each(sk, node, &head->twchain) {
+ spin_lock(lock);
+ sk_nulls_for_each(sk, node, &head->twchain) {
tw = inet_twsk(sk);
if (!net_eq(twsk_net(tw), net) ||
@@ -434,13 +438,13 @@ restart:
continue;
atomic_inc(&tw->tw_refcnt);
- write_unlock(lock);
+ spin_unlock(lock);
inet_twsk_deschedule(tw, twdr);
inet_twsk_put(tw);
goto restart;
}
- write_unlock(lock);
+ spin_unlock(lock);
}
local_bh_enable();
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index a456ceeac3f..b1fbe18feb5 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -144,7 +144,7 @@ static void unlink_from_unused(struct inet_peer *p)
* _stack is known to be NULL or not at compile time,
* so compiler will optimize the if (_stack) tests.
*/
-#define lookup(_daddr,_stack) \
+#define lookup(_daddr, _stack) \
({ \
struct inet_peer *u, **v; \
if (_stack != NULL) { \
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 450016b89a1..df3fe50bbf0 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -106,7 +106,7 @@ int ip_forward(struct sk_buff *skb)
* We now generate an ICMP HOST REDIRECT giving the route
* we calculated.
*/
- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb->sp)
+ if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
ip_rt_send_redirect(skb);
skb->priority = rt_tos2priority(iph->tos);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e4f81f54bef..6659ac000ee 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -56,7 +56,7 @@ struct ipfrag_skb_cb
int offset;
};
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb))
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
@@ -559,9 +559,8 @@ out_nomem:
goto out_fail;
out_oversize:
if (net_ratelimit())
- printk(KERN_INFO
- "Oversized IP packet from " NIPQUAD_FMT ".\n",
- NIPQUAD(qp->saddr));
+ printk(KERN_INFO "Oversized IP packet from %pI4.\n",
+ &qp->saddr);
out_fail:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
return err;
@@ -608,7 +607,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
.data = &init_net.ipv4.frags.high_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_IPFRAG_LOW_THRESH,
@@ -616,7 +615,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
.data = &init_net.ipv4.frags.low_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_IPFRAG_TIME,
@@ -624,8 +623,8 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
.data = &init_net.ipv4.frags.timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{ }
};
@@ -637,15 +636,15 @@ static struct ctl_table ip4_frags_ctl_table[] = {
.data = &ip4_frags.secret_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.procname = "ipfrag_max_dist",
.data = &sysctl_ipfrag_max_dist,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
{ }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 85c487b8572..0101521f366 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -126,8 +126,6 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
/* Fallback tunnel: no source, no destination, no key, no options */
-static int ipgre_fb_tunnel_init(struct net_device *dev);
-
#define HASH_SIZE 16
static int ipgre_net_id;
@@ -371,7 +369,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
by themself???
*/
- struct iphdr *iph = (struct iphdr*)skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
__be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
int grehlen = (iph->ihl<<2) + 4;
const int type = icmp_hdr(skb)->type;
@@ -632,7 +630,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
gre_hlen = 0;
- tiph = (struct iphdr*)skb->data;
+ tiph = (struct iphdr *)skb->data;
} else {
gre_hlen = tunnel->hlen;
tiph = &tunnel->parms.iph;
@@ -660,7 +658,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (neigh == NULL)
goto tx_error;
- addr6 = (struct in6_addr*)&neigh->primary_key;
+ addr6 = (struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if (addr_type == IPV6_ADDR_ANY) {
@@ -726,7 +724,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
#ifdef CONFIG_IPV6
else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
+ struct rt6_info *rt6 = (struct rt6_info *)skb->dst;
if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
if ((tunnel->parms.iph.daddr &&
@@ -800,7 +798,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ttl = old_iph->ttl;
#ifdef CONFIG_IPV6
else if (skb->protocol == htons(ETH_P_IPV6))
- iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
+ iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
else
iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
@@ -962,7 +960,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
} else {
- unsigned nflags=0;
+ unsigned nflags = 0;
t = netdev_priv(dev);
@@ -1104,7 +1102,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
- struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
+ struct iphdr *iph = (struct iphdr *) skb_mac_header(skb);
memcpy(haddr, &iph->saddr, 4);
return 4;
}
@@ -1142,6 +1140,7 @@ static int ipgre_open(struct net_device *dev)
static int ipgre_close(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
+
if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
struct in_device *in_dev;
in_dev = inetdev_by_index(dev_net(dev), t->mlink);
@@ -1155,14 +1154,22 @@ static int ipgre_close(struct net_device *dev)
#endif
+static const struct net_device_ops ipgre_netdev_ops = {
+ .ndo_init = ipgre_tunnel_init,
+ .ndo_uninit = ipgre_tunnel_uninit,
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+ .ndo_open = ipgre_open,
+ .ndo_stop = ipgre_close,
+#endif
+ .ndo_start_xmit = ipgre_tunnel_xmit,
+ .ndo_do_ioctl = ipgre_tunnel_ioctl,
+ .ndo_change_mtu = ipgre_tunnel_change_mtu,
+};
+
static void ipgre_tunnel_setup(struct net_device *dev)
{
- dev->init = ipgre_tunnel_init;
- dev->uninit = ipgre_tunnel_uninit;
+ dev->netdev_ops = &ipgre_netdev_ops;
dev->destructor = free_netdev;
- dev->hard_start_xmit = ipgre_tunnel_xmit;
- dev->do_ioctl = ipgre_tunnel_ioctl;
- dev->change_mtu = ipgre_tunnel_change_mtu;
dev->type = ARPHRD_IPGRE;
dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
@@ -1194,8 +1201,6 @@ static int ipgre_tunnel_init(struct net_device *dev)
return -EINVAL;
dev->flags = IFF_BROADCAST;
dev->header_ops = &ipgre_header_ops;
- dev->open = ipgre_open;
- dev->stop = ipgre_close;
}
#endif
} else
@@ -1204,7 +1209,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
return 0;
}
-static int ipgre_fb_tunnel_init(struct net_device *dev)
+static void ipgre_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -1220,7 +1225,6 @@ static int ipgre_fb_tunnel_init(struct net_device *dev)
dev_hold(dev);
ign->tunnels_wc[0] = tunnel;
- return 0;
}
@@ -1264,9 +1268,9 @@ static int ipgre_init_net(struct net *net)
err = -ENOMEM;
goto err_alloc_dev;
}
-
- ign->fb_tunnel_dev->init = ipgre_fb_tunnel_init;
dev_net_set(ign->fb_tunnel_dev, net);
+
+ ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
if ((err = register_netdev(ign->fb_tunnel_dev)))
@@ -1397,16 +1401,22 @@ static int ipgre_tap_init(struct net_device *dev)
return 0;
}
+static const struct net_device_ops ipgre_tap_netdev_ops = {
+ .ndo_init = ipgre_tap_init,
+ .ndo_uninit = ipgre_tunnel_uninit,
+ .ndo_start_xmit = ipgre_tunnel_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = ipgre_tunnel_change_mtu,
+};
+
static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->init = ipgre_tap_init;
- dev->uninit = ipgre_tunnel_uninit;
+ dev->netdev_ops = &ipgre_netdev_ops;
dev->destructor = free_netdev;
- dev->hard_start_xmit = ipgre_tunnel_xmit;
- dev->change_mtu = ipgre_tunnel_change_mtu;
dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index cfb38ac9d69..1a58a6fa1dc 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -302,10 +302,8 @@ static inline int ip_rcv_options(struct sk_buff *skb)
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
- printk(KERN_INFO "source route option "
- NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
- NIPQUAD(iph->saddr),
- NIPQUAD(iph->daddr));
+ printk(KERN_INFO "source route option %pI4 -> %pI4\n",
+ &iph->saddr, &iph->daddr);
in_dev_put(in_dev);
goto drop;
}
@@ -350,9 +348,9 @@ static int ip_rcv_finish(struct sk_buff *skb)
struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id());
u32 idx = skb->dst->tclassid;
st[idx&0xFF].o_packets++;
- st[idx&0xFF].o_bytes+=skb->len;
+ st[idx&0xFF].o_bytes += skb->len;
st[(idx>>16)&0xFF].i_packets++;
- st[(idx>>16)&0xFF].i_bytes+=skb->len;
+ st[(idx>>16)&0xFF].i_bytes += skb->len;
}
#endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d2a8f8bb78a..8ebe86dd72a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -430,7 +430,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
* single device frame, and queue such a frame for sending.
*/
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct iphdr *iph;
int raw = 0;
@@ -720,7 +720,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
- int transhdrlen, int mtu,unsigned int flags)
+ int transhdrlen, int mtu, unsigned int flags)
{
struct sk_buff *skb;
int err;
@@ -741,7 +741,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
- skb_put(skb,fragheaderlen + transhdrlen);
+ skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
@@ -778,7 +778,7 @@ int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
- struct ipcm_cookie *ipc, struct rtable *rt,
+ struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
@@ -793,6 +793,7 @@ int ip_append_data(struct sock *sk,
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
+ struct rtable *rt;
if (flags&MSG_PROBE)
return 0;
@@ -812,7 +813,11 @@ int ip_append_data(struct sock *sk,
inet->cork.flags |= IPCORK_OPT;
inet->cork.addr = ipc->addr;
}
- dst_hold(&rt->u.dst);
+ rt = *rtp;
+ /*
+ * We steal reference to this route, caller should not release it
+ */
+ *rtp = NULL;
inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
rt->u.dst.dev->mtu :
dst_mtu(rt->u.dst.path);
@@ -1279,7 +1284,12 @@ int ip_push_pending_frames(struct sock *sk)
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb->dst = dst_clone(&rt->u.dst);
+ /*
+ * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
+ * on dst refcount
+ */
+ inet->cork.dst = NULL;
+ skb->dst = &rt->u.dst;
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
@@ -1391,7 +1401,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
- &ipc, rt, MSG_DONTWAIT);
+ &ipc, &rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(skb) +
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 465abf0a986..43c05854d75 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -48,6 +48,7 @@
#define IP_CMSG_RECVOPTS 8
#define IP_CMSG_RETOPTS 16
#define IP_CMSG_PASSSEC 32
+#define IP_CMSG_ORIGDSTADDR 64
/*
* SOL_IP control messages.
@@ -94,7 +95,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
- struct ip_options * opt = (struct ip_options*)optbuf;
+ struct ip_options * opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
@@ -126,6 +127,27 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
security_release_secctx(secdata, seclen);
}
+static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+{
+ struct sockaddr_in sin;
+ struct iphdr *iph = ip_hdr(skb);
+ __be16 *ports = (__be16 *)skb_transport_header(skb);
+
+ if (skb_transport_offset(skb) + 4 > skb->len)
+ return;
+
+ /* All current transport protocols have the port numbers in the
+ * first four bytes of the transport header and this function is
+ * written with this assumption in mind.
+ */
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = iph->daddr;
+ sin.sin_port = ports[1];
+ memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
+
+ put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
+}
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
@@ -160,6 +182,12 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
if (flags & 1)
ip_cmsg_recv_security(msg, skb);
+
+ if ((flags>>=1) == 0)
+ return;
+ if (flags & 1)
+ ip_cmsg_recv_dstaddr(msg, skb);
+
}
int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
@@ -411,7 +439,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, int optlen)
{
struct inet_sock *inet = inet_sk(sk);
- int val=0,err;
+ int val = 0, err;
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
@@ -421,7 +449,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
optname == IP_MULTICAST_TTL ||
- optname == IP_MULTICAST_LOOP) {
+ optname == IP_MULTICAST_LOOP ||
+ optname == IP_RECVORIGDSTADDR) {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
@@ -437,7 +466,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
/* If optlen==0, it is equivalent to val == 0 */
if (ip_mroute_opt(optname))
- return ip_mroute_setsockopt(sk,optname,optval,optlen);
+ return ip_mroute_setsockopt(sk, optname, optval, optlen);
err = 0;
lock_sock(sk);
@@ -509,6 +538,12 @@ static int do_ip_setsockopt(struct sock *sk, int level,
else
inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
break;
+ case IP_RECVORIGDSTADDR:
+ if (val)
+ inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
+ else
+ inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
+ break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
val &= ~3;
@@ -549,7 +584,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
goto e_inval;
if (optlen<1)
goto e_inval;
- if (val==-1)
+ if (val == -1)
val = 1;
if (val < 0 || val > 255)
goto e_inval;
@@ -573,12 +608,12 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
- if (copy_from_user(&mreq,optval,sizeof(mreq)))
+ if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (optlen >= sizeof(struct in_addr) &&
- copy_from_user(&mreq.imr_address,optval,sizeof(struct in_addr)))
+ copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr)))
break;
}
@@ -626,11 +661,11 @@ static int do_ip_setsockopt(struct sock *sk, int level,
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
- if (copy_from_user(&mreq,optval,sizeof(mreq)))
+ if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
- if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq)))
+ if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
break;
}
@@ -808,7 +843,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = -ENOBUFS;
break;
}
- gsf = kmalloc(optlen,GFP_KERNEL);
+ gsf = kmalloc(optlen, GFP_KERNEL);
if (!gsf) {
err = -ENOBUFS;
break;
@@ -828,7 +863,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
goto mc_msf_out;
}
msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
- msf = kmalloc(msize,GFP_KERNEL);
+ msf = kmalloc(msize, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
goto mc_msf_out;
@@ -971,9 +1006,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
return -EOPNOTSUPP;
if (ip_mroute_opt(optname))
- return ip_mroute_getsockopt(sk,optname,optval,optlen);
+ return ip_mroute_getsockopt(sk, optname, optval, optlen);
- if (get_user(len,optlen))
+ if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
@@ -984,7 +1019,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_OPTIONS:
{
unsigned char optbuf[sizeof(struct ip_options)+40];
- struct ip_options * opt = (struct ip_options*)optbuf;
+ struct ip_options * opt = (struct ip_options *)optbuf;
opt->optlen = 0;
if (inet->opt)
memcpy(optbuf, inet->opt,
@@ -1022,6 +1057,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_PASSSEC:
val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
break;
+ case IP_RECVORIGDSTADDR:
+ val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
+ break;
case IP_TOS:
val = inet->tos;
break;
@@ -1154,13 +1192,13 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
len = 1;
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval,&ucval,1))
+ if (copy_to_user(optval, &ucval, 1))
return -EFAULT;
} else {
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval,&val,len))
+ if (copy_to_user(optval, &val, len))
return -EFAULT;
}
return 0;
@@ -1178,7 +1216,7 @@ int ip_getsockopt(struct sock *sk, int level,
!ip_mroute_opt(optname)) {
int len;
- if (get_user(len,optlen))
+ if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38ccb6dfb02..3262ce06294 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -35,12 +35,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
return;
spi = htonl(ntohs(ipch->cpi));
- x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
+ x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/" NIPQUAD_FMT "\n",
- spi, NIPQUAD(iph->daddr));
+ NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n",
+ spi, &iph->daddr);
xfrm_state_put(x);
}
@@ -49,7 +49,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
{
struct xfrm_state *t;
- t = xfrm_state_alloc();
+ t = xfrm_state_alloc(&init_net);
if (t == NULL)
goto out;
@@ -85,7 +85,7 @@ static int ipcomp_tunnel_attach(struct xfrm_state *x)
int err = 0;
struct xfrm_state *t;
- t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4,
+ t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4,
x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
if (!t) {
t = ipcomp_tunnel_create(x);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 42065fff46c..42a0f3dd3fd 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -374,7 +374,7 @@ static int __init ic_defaults(void)
*/
if (!ic_host_name_set)
- sprintf(init_utsname()->nodename, NIPQUAD_FMT, NIPQUAD(ic_myaddr));
+ sprintf(init_utsname()->nodename, "%pI4", &ic_myaddr);
if (root_server_addr == NONE)
root_server_addr = ic_servaddr;
@@ -387,11 +387,11 @@ static int __init ic_defaults(void)
else if (IN_CLASSC(ntohl(ic_myaddr)))
ic_netmask = htonl(IN_CLASSC_NET);
else {
- printk(KERN_ERR "IP-Config: Unable to guess netmask for address " NIPQUAD_FMT "\n",
- NIPQUAD(ic_myaddr));
+ printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n",
+ &ic_myaddr);
return -1;
}
- printk("IP-Config: Guessing netmask " NIPQUAD_FMT "\n", NIPQUAD(ic_netmask));
+ printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask);
}
return 0;
@@ -979,10 +979,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
ic_myaddr = b->your_ip;
ic_servaddr = server_id;
#ifdef IPCONFIG_DEBUG
- printk("DHCP: Offered address " NIPQUAD_FMT,
- NIPQUAD(ic_myaddr));
- printk(" by server " NIPQUAD_FMT "\n",
- NIPQUAD(ic_servaddr));
+ printk("DHCP: Offered address %pI4 by server %pI4\n",
+ &ic_myaddr, &ic_servaddr);
#endif
/* The DHCP indicated server address takes
* precedence over the bootp header one if
@@ -1177,11 +1175,11 @@ static int __init ic_dynamic(void)
return -1;
}
- printk("IP-Config: Got %s answer from " NIPQUAD_FMT ", ",
+ printk("IP-Config: Got %s answer from %pI4, ",
((ic_got_reply & IC_RARP) ? "RARP"
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
- NIPQUAD(ic_servaddr));
- printk("my address is " NIPQUAD_FMT "\n", NIPQUAD(ic_myaddr));
+ &ic_servaddr);
+ printk("my address is %pI4\n", &ic_myaddr);
return 0;
}
@@ -1206,14 +1204,12 @@ static int pnp_seq_show(struct seq_file *seq, void *v)
"domain %s\n", ic_domain);
for (i = 0; i < CONF_NAMESERVERS_MAX; i++) {
if (ic_nameservers[i] != NONE)
- seq_printf(seq,
- "nameserver " NIPQUAD_FMT "\n",
- NIPQUAD(ic_nameservers[i]));
+ seq_printf(seq, "nameserver %pI4\n",
+ &ic_nameservers[i]);
}
if (ic_servaddr != NONE)
- seq_printf(seq,
- "bootserver " NIPQUAD_FMT "\n",
- NIPQUAD(ic_servaddr));
+ seq_printf(seq, "bootserver %pI4\n",
+ &ic_servaddr);
return 0;
}
@@ -1387,13 +1383,13 @@ static int __init ip_auto_config(void)
*/
printk("IP-Config: Complete:");
printk("\n device=%s", ic_dev->name);
- printk(", addr=" NIPQUAD_FMT, NIPQUAD(ic_myaddr));
- printk(", mask=" NIPQUAD_FMT, NIPQUAD(ic_netmask));
- printk(", gw=" NIPQUAD_FMT, NIPQUAD(ic_gateway));
+ printk(", addr=%pI4", &ic_myaddr);
+ printk(", mask=%pI4", &ic_netmask);
+ printk(", gw=%pI4", &ic_gateway);
printk(",\n host=%s, domain=%s, nis-domain=%s",
utsname()->nodename, ic_domain, utsname()->domainname);
- printk(",\n bootserver=" NIPQUAD_FMT, NIPQUAD(ic_servaddr));
- printk(", rootserver=" NIPQUAD_FMT, NIPQUAD(root_server_addr));
+ printk(",\n bootserver=%pI4", &ic_servaddr);
+ printk(", rootserver=%pI4", &root_server_addr);
printk(", rootpath=%s", root_server_path);
printk("\n");
#endif /* !SILENT */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 29609d29df7..5079dfbc6f3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,8 +130,8 @@ struct ipip_net {
struct net_device *fb_tunnel_dev;
};
-static int ipip_fb_tunnel_init(struct net_device *dev);
-static int ipip_tunnel_init(struct net_device *dev);
+static void ipip_fb_tunnel_init(struct net_device *dev);
+static void ipip_tunnel_init(struct net_device *dev);
static void ipip_tunnel_setup(struct net_device *dev);
static DEFINE_RWLOCK(ipip_lock);
@@ -245,9 +245,10 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
}
nt = netdev_priv(dev);
- dev->init = ipip_tunnel_init;
nt->parms = *parms;
+ ipip_tunnel_init(dev);
+
if (register_netdevice(dev) < 0)
goto failed_free;
@@ -281,7 +282,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
- struct iphdr *iph = (struct iphdr*)skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
@@ -691,12 +692,17 @@ static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static const struct net_device_ops ipip_netdev_ops = {
+ .ndo_uninit = ipip_tunnel_uninit,
+ .ndo_start_xmit = ipip_tunnel_xmit,
+ .ndo_do_ioctl = ipip_tunnel_ioctl,
+ .ndo_change_mtu = ipip_tunnel_change_mtu,
+
+};
+
static void ipip_tunnel_setup(struct net_device *dev)
{
- dev->uninit = ipip_tunnel_uninit;
- dev->hard_start_xmit = ipip_tunnel_xmit;
- dev->do_ioctl = ipip_tunnel_ioctl;
- dev->change_mtu = ipip_tunnel_change_mtu;
+ dev->netdev_ops = &ipip_netdev_ops;
dev->destructor = free_netdev;
dev->type = ARPHRD_TUNNEL;
@@ -708,11 +714,9 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
}
-static int ipip_tunnel_init(struct net_device *dev)
+static void ipip_tunnel_init(struct net_device *dev)
{
- struct ip_tunnel *tunnel;
-
- tunnel = netdev_priv(dev);
+ struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
@@ -721,11 +725,9 @@ static int ipip_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip_tunnel_bind_dev(dev);
-
- return 0;
}
-static int ipip_fb_tunnel_init(struct net_device *dev)
+static void ipip_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -740,7 +742,6 @@ static int ipip_fb_tunnel_init(struct net_device *dev)
dev_hold(dev);
ipn->tunnels_wc[0] = tunnel;
- return 0;
}
static struct xfrm_tunnel ipip_handler = {
@@ -792,10 +793,10 @@ static int ipip_init_net(struct net *net)
err = -ENOMEM;
goto err_alloc_dev;
}
-
- ipn->fb_tunnel_dev->init = ipip_fb_tunnel_init;
dev_net_set(ipn->fb_tunnel_dev, net);
+ ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
+
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 25924b1eb2e..14666449dc1 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,8 +124,8 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
dev = __dev_get_by_name(&init_net, "tunl0");
if (dev) {
+ const struct net_device_ops *ops = dev->netdev_ops;
struct ifreq ifr;
- mm_segment_t oldfs;
struct ip_tunnel_parm p;
memset(&p, 0, sizeof(p));
@@ -137,9 +137,13 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
- oldfs = get_fs(); set_fs(KERNEL_DS);
- dev->do_ioctl(dev, &ifr, SIOCDELTUNNEL);
- set_fs(oldfs);
+ if (ops->ndo_do_ioctl) {
+ mm_segment_t oldfs = get_fs();
+
+ set_fs(KERNEL_DS);
+ ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
+ set_fs(oldfs);
+ }
}
}
@@ -151,9 +155,9 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
dev = __dev_get_by_name(&init_net, "tunl0");
if (dev) {
+ const struct net_device_ops *ops = dev->netdev_ops;
int err;
struct ifreq ifr;
- mm_segment_t oldfs;
struct ip_tunnel_parm p;
struct in_device *in_dev;
@@ -166,9 +170,14 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
- oldfs = get_fs(); set_fs(KERNEL_DS);
- err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
- set_fs(oldfs);
+ if (ops->ndo_do_ioctl) {
+ mm_segment_t oldfs = get_fs();
+
+ set_fs(KERNEL_DS);
+ err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
+ set_fs(oldfs);
+ } else
+ err = -EOPNOTSUPP;
dev = NULL;
@@ -213,12 +222,16 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static const struct net_device_ops reg_vif_netdev_ops = {
+ .ndo_start_xmit = reg_vif_xmit,
+};
+
static void reg_vif_setup(struct net_device *dev)
{
dev->type = ARPHRD_PIMREG;
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
dev->flags = IFF_NOARP;
- dev->hard_start_xmit = reg_vif_xmit;
+ dev->netdev_ops = &reg_vif_netdev_ops,
dev->destructor = free_netdev;
}
@@ -331,7 +344,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
atomic_dec(&cache_resolve_queue_len);
- while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
+ while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
nlh->nlmsg_type = NLMSG_ERROR;
@@ -477,13 +490,13 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
/*
* Fill in the VIF structures
*/
- v->rate_limit=vifc->vifc_rate_limit;
- v->local=vifc->vifc_lcl_addr.s_addr;
- v->remote=vifc->vifc_rmt_addr.s_addr;
- v->flags=vifc->vifc_flags;
+ v->rate_limit = vifc->vifc_rate_limit;
+ v->local = vifc->vifc_lcl_addr.s_addr;
+ v->remote = vifc->vifc_rmt_addr.s_addr;
+ v->flags = vifc->vifc_flags;
if (!mrtsock)
v->flags |= VIFF_STATIC;
- v->threshold=vifc->vifc_threshold;
+ v->threshold = vifc->vifc_threshold;
v->bytes_in = 0;
v->bytes_out = 0;
v->pkt_in = 0;
@@ -494,7 +507,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
- v->dev=dev;
+ v->dev = dev;
#ifdef CONFIG_IP_PIMSM
if (v->flags&VIFF_REGISTER)
reg_vif_num = vifi;
@@ -507,7 +520,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
{
- int line=MFC_HASH(mcastgrp,origin);
+ int line = MFC_HASH(mcastgrp, origin);
struct mfc_cache *c;
for (c=mfc_cache_array[line]; c; c = c->next) {
@@ -522,8 +535,8 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
*/
static struct mfc_cache *ipmr_cache_alloc(void)
{
- struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c==NULL)
+ struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
+ if (c == NULL)
return NULL;
c->mfc_un.res.minvif = MAXVIFS;
return c;
@@ -531,8 +544,8 @@ static struct mfc_cache *ipmr_cache_alloc(void)
static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
- struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
- if (c==NULL)
+ struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
+ if (c == NULL)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10*HZ;
@@ -552,7 +565,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
* Play the pending entries through our router
*/
- while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
+ while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
@@ -637,7 +650,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
* Add our header
*/
- igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
+ igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
igmp->type =
msg->im_msgtype = assert;
igmp->code = 0;
@@ -653,7 +666,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
/*
* Deliver to mrouted
*/
- if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
+ if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
kfree_skb(skb);
@@ -685,7 +698,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
* Create a new entry if allowable
*/
- if (atomic_read(&cache_resolve_queue_len)>=10 ||
+ if (atomic_read(&cache_resolve_queue_len) >= 10 ||
(c=ipmr_cache_alloc_unres())==NULL) {
spin_unlock_bh(&mfc_unres_lock);
@@ -728,7 +741,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
kfree_skb(skb);
err = -ENOBUFS;
} else {
- skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
+ skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
err = 0;
}
@@ -745,7 +758,7 @@ static int ipmr_mfc_delete(struct mfcctl *mfc)
int line;
struct mfc_cache *c, **cp;
- line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
+ line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
@@ -766,7 +779,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
int line;
struct mfc_cache *uc, *c, **cp;
- line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
+ line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
@@ -787,13 +800,13 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
return -EINVAL;
- c=ipmr_cache_alloc();
- if (c==NULL)
+ c = ipmr_cache_alloc();
+ if (c == NULL)
return -ENOMEM;
- c->mfc_origin=mfc->mfcc_origin.s_addr;
- c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
- c->mfc_parent=mfc->mfcc_parent;
+ c->mfc_origin = mfc->mfcc_origin.s_addr;
+ c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
+ c->mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(c, mfc->mfcc_ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
@@ -846,7 +859,7 @@ static void mroute_clean_tables(struct sock *sk)
/*
* Wipe the cache
*/
- for (i=0;i<MFC_LINES;i++) {
+ for (i=0; i<MFC_LINES; i++) {
struct mfc_cache *c, **cp;
cp = &mfc_cache_array[i];
@@ -887,7 +900,7 @@ static void mrtsock_destruct(struct sock *sk)
IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
write_lock_bh(&mrt_lock);
- mroute_socket=NULL;
+ mroute_socket = NULL;
write_unlock_bh(&mrt_lock);
mroute_clean_tables(sk);
@@ -902,7 +915,7 @@ static void mrtsock_destruct(struct sock *sk)
* MOSPF/PIM router set up we can clean this up.
*/
-int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
+int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
{
int ret;
struct vifctl vif;
@@ -918,7 +931,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
if (sk->sk_type != SOCK_RAW ||
inet_sk(sk)->num != IPPROTO_IGMP)
return -EOPNOTSUPP;
- if (optlen!=sizeof(int))
+ if (optlen != sizeof(int))
return -ENOPROTOOPT;
rtnl_lock();
@@ -930,7 +943,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
write_lock_bh(&mrt_lock);
- mroute_socket=sk;
+ mroute_socket = sk;
write_unlock_bh(&mrt_lock);
IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
@@ -938,19 +951,19 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk!=mroute_socket)
+ if (sk != mroute_socket)
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
case MRT_DEL_VIF:
- if (optlen!=sizeof(vif))
+ if (optlen != sizeof(vif))
return -EINVAL;
- if (copy_from_user(&vif,optval,sizeof(vif)))
+ if (copy_from_user(&vif, optval, sizeof(vif)))
return -EFAULT;
if (vif.vifc_vifi >= MAXVIFS)
return -ENFILE;
rtnl_lock();
- if (optname==MRT_ADD_VIF) {
+ if (optname == MRT_ADD_VIF) {
ret = vif_add(&vif, sk==mroute_socket);
} else {
ret = vif_delete(vif.vifc_vifi, 0);
@@ -964,12 +977,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
*/
case MRT_ADD_MFC:
case MRT_DEL_MFC:
- if (optlen!=sizeof(mfc))
+ if (optlen != sizeof(mfc))
return -EINVAL;
- if (copy_from_user(&mfc,optval, sizeof(mfc)))
+ if (copy_from_user(&mfc, optval, sizeof(mfc)))
return -EFAULT;
rtnl_lock();
- if (optname==MRT_DEL_MFC)
+ if (optname == MRT_DEL_MFC)
ret = ipmr_mfc_delete(&mfc);
else
ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
@@ -1028,12 +1041,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
* Getsock opt support for the multicast routing system.
*/
-int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
+int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
{
int olr;
int val;
- if (optname!=MRT_VERSION &&
+ if (optname != MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
optname!=MRT_PIM &&
#endif
@@ -1047,17 +1060,17 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
if (olr < 0)
return -EINVAL;
- if (put_user(olr,optlen))
+ if (put_user(olr, optlen))
return -EFAULT;
- if (optname==MRT_VERSION)
- val=0x0305;
+ if (optname == MRT_VERSION)
+ val = 0x0305;
#ifdef CONFIG_IP_PIMSM
- else if (optname==MRT_PIM)
- val=mroute_do_pim;
+ else if (optname == MRT_PIM)
+ val = mroute_do_pim;
#endif
else
- val=mroute_do_assert;
- if (copy_to_user(optval,&val,olr))
+ val = mroute_do_assert;
+ if (copy_to_user(optval, &val, olr))
return -EFAULT;
return 0;
}
@@ -1075,27 +1088,27 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
switch (cmd) {
case SIOCGETVIFCNT:
- if (copy_from_user(&vr,arg,sizeof(vr)))
+ if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
- if (vr.vifi>=maxvif)
+ if (vr.vifi >= maxvif)
return -EINVAL;
read_lock(&mrt_lock);
vif=&vif_table[vr.vifi];
if (VIF_EXISTS(vr.vifi)) {
- vr.icount=vif->pkt_in;
- vr.ocount=vif->pkt_out;
- vr.ibytes=vif->bytes_in;
- vr.obytes=vif->bytes_out;
+ vr.icount = vif->pkt_in;
+ vr.ocount = vif->pkt_out;
+ vr.ibytes = vif->bytes_in;
+ vr.obytes = vif->bytes_out;
read_unlock(&mrt_lock);
- if (copy_to_user(arg,&vr,sizeof(vr)))
+ if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
read_unlock(&mrt_lock);
return -EADDRNOTAVAIL;
case SIOCGETSGCNT:
- if (copy_from_user(&sr,arg,sizeof(sr)))
+ if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
read_lock(&mrt_lock);
@@ -1106,7 +1119,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
sr.wrong_if = c->mfc_un.res.wrong_if;
read_unlock(&mrt_lock);
- if (copy_to_user(arg,&sr,sizeof(sr)))
+ if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
@@ -1130,15 +1143,15 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
v=&vif_table[0];
- for (ct=0;ct<maxvif;ct++,v++) {
- if (v->dev==dev)
+ for (ct=0; ct<maxvif; ct++,v++) {
+ if (v->dev == dev)
vif_delete(ct, 1);
}
return NOTIFY_DONE;
}
-static struct notifier_block ip_mr_notifier={
+static struct notifier_block ip_mr_notifier = {
.notifier_call = ipmr_device_event,
};
@@ -1204,7 +1217,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
#ifdef CONFIG_IP_PIMSM
if (vif->flags & VIFF_REGISTER) {
vif->pkt_out++;
- vif->bytes_out+=skb->len;
+ vif->bytes_out += skb->len;
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
@@ -1254,7 +1267,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
}
vif->pkt_out++;
- vif->bytes_out+=skb->len;
+ vif->bytes_out += skb->len;
dst_release(skb->dst);
skb->dst = &rt->u.dst;
@@ -1352,7 +1365,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
}
vif_table[vif].pkt_in++;
- vif_table[vif].bytes_in+=skb->len;
+ vif_table[vif].bytes_in += skb->len;
/*
* Forward the frame
@@ -1364,7 +1377,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (skb2)
ipmr_queue_xmit(skb2, cache, psend);
}
- psend=ct;
+ psend = ct;
}
}
if (psend != -1) {
@@ -1428,7 +1441,7 @@ int ip_mr_input(struct sk_buff *skb)
/*
* No usable cache entry
*/
- if (cache==NULL) {
+ if (cache == NULL) {
int vif;
if (local) {
@@ -1469,29 +1482,13 @@ dont_forward:
return 0;
}
-#ifdef CONFIG_IP_PIMSM_V1
-/*
- * Handle IGMP messages of PIMv1
- */
-
-int pim_rcv_v1(struct sk_buff * skb)
+#ifdef CONFIG_IP_PIMSM
+static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
{
- struct igmphdr *pim;
- struct iphdr *encap;
- struct net_device *reg_dev = NULL;
-
- if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
- goto drop;
+ struct net_device *reg_dev = NULL;
+ struct iphdr *encap;
- pim = igmp_hdr(skb);
-
- if (!mroute_do_pim ||
- skb->len < sizeof(*pim) + sizeof(*encap) ||
- pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
- goto drop;
-
- encap = (struct iphdr *)(skb_transport_header(skb) +
- sizeof(struct igmphdr));
+ encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/*
Check that:
a. packet is really destinted to a multicast group
@@ -1500,8 +1497,8 @@ int pim_rcv_v1(struct sk_buff * skb)
*/
if (!ipv4_is_multicast(encap->daddr) ||
encap->tot_len == 0 ||
- ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
- goto drop;
+ ntohs(encap->tot_len) + pimlen > skb->len)
+ return 1;
read_lock(&mrt_lock);
if (reg_vif_num >= 0)
@@ -1511,7 +1508,7 @@ int pim_rcv_v1(struct sk_buff * skb)
read_unlock(&mrt_lock);
if (reg_dev == NULL)
- goto drop;
+ return 1;
skb->mac_header = skb->network_header;
skb_pull(skb, (u8*)encap - skb->data);
@@ -1527,9 +1524,33 @@ int pim_rcv_v1(struct sk_buff * skb)
nf_reset(skb);
netif_rx(skb);
dev_put(reg_dev);
+
return 0;
- drop:
- kfree_skb(skb);
+}
+#endif
+
+#ifdef CONFIG_IP_PIMSM_V1
+/*
+ * Handle IGMP messages of PIMv1
+ */
+
+int pim_rcv_v1(struct sk_buff * skb)
+{
+ struct igmphdr *pim;
+
+ if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
+ goto drop;
+
+ pim = igmp_hdr(skb);
+
+ if (!mroute_do_pim ||
+ pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
+ goto drop;
+
+ if (__pim_rcv(skb, sizeof(*pim))) {
+drop:
+ kfree_skb(skb);
+ }
return 0;
}
#endif
@@ -1538,10 +1559,8 @@ int pim_rcv_v1(struct sk_buff * skb)
static int pim_rcv(struct sk_buff * skb)
{
struct pimreghdr *pim;
- struct iphdr *encap;
- struct net_device *reg_dev = NULL;
- if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
+ if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
pim = (struct pimreghdr *)skb_transport_header(skb);
@@ -1551,41 +1570,10 @@ static int pim_rcv(struct sk_buff * skb)
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
- /* check if the inner packet is destined to mcast group */
- encap = (struct iphdr *)(skb_transport_header(skb) +
- sizeof(struct pimreghdr));
- if (!ipv4_is_multicast(encap->daddr) ||
- encap->tot_len == 0 ||
- ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
- goto drop;
-
- read_lock(&mrt_lock);
- if (reg_vif_num >= 0)
- reg_dev = vif_table[reg_vif_num].dev;
- if (reg_dev)
- dev_hold(reg_dev);
- read_unlock(&mrt_lock);
-
- if (reg_dev == NULL)
- goto drop;
-
- skb->mac_header = skb->network_header;
- skb_pull(skb, (u8*)encap - skb->data);
- skb_reset_network_header(skb);
- skb->dev = reg_dev;
- skb->protocol = htons(ETH_P_IP);
- skb->ip_summed = 0;
- skb->pkt_type = PACKET_HOST;
- dst_release(skb->dst);
- reg_dev->stats.rx_bytes += skb->len;
- reg_dev->stats.rx_packets++;
- skb->dst = NULL;
- nf_reset(skb);
- netif_rx(skb);
- dev_put(reg_dev);
- return 0;
- drop:
- kfree_skb(skb);
+ if (__pim_rcv(skb, sizeof(*pim))) {
+drop:
+ kfree_skb(skb);
+ }
return 0;
}
#endif
@@ -1602,13 +1590,13 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
if (dev)
RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
- mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
+ mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
if (c->mfc_un.res.ttls[ct] < 255) {
if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
goto rtattr_failure;
- nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
+ nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
@@ -1634,7 +1622,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
read_lock(&mrt_lock);
cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
- if (cache==NULL) {
+ if (cache == NULL) {
struct sk_buff *skb2;
struct iphdr *iph;
struct net_device *dev;
@@ -1866,15 +1854,16 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
const struct mfc_cache *mfc = v;
const struct ipmr_mfc_iter *it = seq->private;
- seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
+ seq_printf(seq, "%08lX %08lX %-3hd",
(unsigned long) mfc->mfc_mcastgrp,
(unsigned long) mfc->mfc_origin,
- mfc->mfc_parent,
- mfc->mfc_un.res.pkt,
- mfc->mfc_un.res.bytes,
- mfc->mfc_un.res.wrong_if);
+ mfc->mfc_parent);
if (it->cache != &mfc_unres_queue) {
+ seq_printf(seq, " %8lu %8lu %8lu",
+ mfc->mfc_un.res.pkt,
+ mfc->mfc_un.res.bytes,
+ mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++ ) {
if (VIF_EXISTS(n)
@@ -1883,6 +1872,11 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
" %2d:%-3d",
n, mfc->mfc_un.res.ttls[n]);
}
+ } else {
+ /* unresolved mfc_caches don't contain
+ * pkt, bytes and wrong_if values
+ */
+ seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
}
seq_putc(seq, '\n');
}
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 6efdb70b3eb..fdf6811c31a 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -66,7 +66,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
#ifdef CONFIG_XFRM
if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
xfrm_decode_session(skb, &fl, AF_INET) == 0)
- if (xfrm_lookup(&skb->dst, &fl, skb->sk, 0))
+ if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0))
return -1;
#endif
@@ -97,7 +97,7 @@ int ip_xfrm_me_harder(struct sk_buff *skb)
dst = ((struct xfrm_dst *)dst)->route;
dst_hold(dst);
- if (xfrm_lookup(&dst, &fl, skb->sk, 0) < 0)
+ if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0)
return -1;
dst_release(skb->dst);
@@ -125,6 +125,7 @@ struct ip_rt_info {
__be32 daddr;
__be32 saddr;
u_int8_t tos;
+ u_int32_t mark;
};
static void nf_ip_saveroute(const struct sk_buff *skb,
@@ -138,6 +139,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb,
rt_info->tos = iph->tos;
rt_info->daddr = iph->daddr;
rt_info->saddr = iph->saddr;
+ rt_info->mark = skb->mark;
}
}
@@ -150,6 +152,7 @@ static int nf_ip_reroute(struct sk_buff *skb,
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos
+ && skb->mark == rt_info->mark
&& iph->daddr == rt_info->daddr
&& iph->saddr == rt_info->saddr))
return ip_route_me_harder(skb, RTN_UNSPEC);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 8d70d29f1cc..7ea88b61cb0 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -142,15 +142,15 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
ARPT_INV_TGTIP)) {
dprintf("Source or target IP address mismatch.\n");
- dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
- NIPQUAD(src_ipaddr),
- NIPQUAD(arpinfo->smsk.s_addr),
- NIPQUAD(arpinfo->src.s_addr),
+ dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
+ &src_ipaddr,
+ &arpinfo->smsk.s_addr,
+ &arpinfo->src.s_addr,
arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : "");
- dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
- NIPQUAD(tgt_ipaddr),
- NIPQUAD(arpinfo->tmsk.s_addr),
- NIPQUAD(arpinfo->tgt.s_addr),
+ dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
+ &tgt_ipaddr,
+ &arpinfo->tmsk.s_addr,
+ &arpinfo->tgt.s_addr,
arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : "");
return 0;
}
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index bee3d117661..e091187e864 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -75,16 +75,6 @@ static unsigned int arpt_out_hook(unsigned int hook,
dev_net(out)->ipv4.arptable_filter);
}
-static unsigned int arpt_forward_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return arpt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.arptable_filter);
-}
-
static struct nf_hook_ops arpt_ops[] __read_mostly = {
{
.hook = arpt_in_hook,
@@ -101,7 +91,7 @@ static struct nf_hook_ops arpt_ops[] __read_mostly = {
.priority = NF_IP_PRI_FILTER,
},
{
- .hook = arpt_forward_hook,
+ .hook = arpt_in_hook,
.owner = THIS_MODULE,
.pf = NFPROTO_ARP,
.hooknum = NF_ARP_FORWARD,
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 213fb27debc..ef8b6ca068b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -94,15 +94,11 @@ ip_packet_match(const struct iphdr *ip,
IPT_INV_DSTIP)) {
dprintf("Source or dest mismatch.\n");
- dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
- NIPQUAD(ip->saddr),
- NIPQUAD(ipinfo->smsk.s_addr),
- NIPQUAD(ipinfo->src.s_addr),
+ dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
+ &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
- dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
- NIPQUAD(ip->daddr),
- NIPQUAD(ipinfo->dmsk.s_addr),
- NIPQUAD(ipinfo->dst.s_addr),
+ dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
+ &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
return false;
}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 7ac1677419a..2e4f98b8552 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -168,7 +168,7 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
char buffer[16];
/* create proc dir entry */
- sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
+ sprintf(buffer, "%pI4", &ip);
c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR,
clusterip_procdir,
&clusterip_proc_fops, c);
@@ -373,7 +373,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par)
config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
if (!config) {
if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
- printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr));
+ printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr);
return false;
} else {
struct net_device *dev;
@@ -478,9 +478,8 @@ static void arp_print(struct arp_payload *payload)
}
hbuffer[--k]='\0';
- printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n",
- NIPQUAD(payload->src_ip), hbuffer,
- NIPQUAD(payload->dst_ip));
+ printk("src %pI4@%s, dst %pI4\n",
+ &payload->src_ip, hbuffer, &payload->dst_ip);
}
#endif
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 7b5dbe118c0..27a78fbbd92 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -54,8 +54,8 @@ static void dump_packet(const struct nf_loginfo *info,
/* Important fields:
* TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
/* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
- printk("SRC=%u.%u.%u.%u DST=%u.%u.%u.%u ",
- NIPQUAD(ih->saddr), NIPQUAD(ih->daddr));
+ printk("SRC=%pI4 DST=%pI4 ",
+ &ih->saddr, &ih->daddr);
/* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
printk("LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
@@ -262,8 +262,7 @@ static void dump_packet(const struct nf_loginfo *info,
break;
case ICMP_REDIRECT:
/* Max length: 24 "GATEWAY=255.255.255.255 " */
- printk("GATEWAY=%u.%u.%u.%u ",
- NIPQUAD(ich->un.gateway));
+ printk("GATEWAY=%pI4 ", &ich->un.gateway);
/* Fall through */
case ICMP_DEST_UNREACH:
case ICMP_SOURCE_QUENCH:
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 88762f02779..3b216be3bc9 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -23,24 +23,25 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("Xtables: address type match for IPv4");
-static inline bool match_type(const struct net_device *dev, __be32 addr,
- u_int16_t mask)
+static inline bool match_type(struct net *net, const struct net_device *dev,
+ __be32 addr, u_int16_t mask)
{
- return !!(mask & (1 << inet_dev_addr_type(&init_net, dev, addr)));
+ return !!(mask & (1 << inet_dev_addr_type(net, dev, addr)));
}
static bool
addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
{
+ struct net *net = dev_net(par->in ? par->in : par->out);
const struct ipt_addrtype_info *info = par->matchinfo;
const struct iphdr *iph = ip_hdr(skb);
bool ret = true;
if (info->source)
- ret &= match_type(NULL, iph->saddr, info->source) ^
+ ret &= match_type(net, NULL, iph->saddr, info->source) ^
info->invert_source;
if (info->dest)
- ret &= match_type(NULL, iph->daddr, info->dest) ^
+ ret &= match_type(net, NULL, iph->daddr, info->dest) ^
info->invert_dest;
return ret;
@@ -49,6 +50,7 @@ addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
static bool
addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
{
+ struct net *net = dev_net(par->in ? par->in : par->out);
const struct ipt_addrtype_info_v1 *info = par->matchinfo;
const struct iphdr *iph = ip_hdr(skb);
const struct net_device *dev = NULL;
@@ -60,10 +62,10 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
dev = par->out;
if (info->source)
- ret &= match_type(dev, iph->saddr, info->source) ^
+ ret &= match_type(net, dev, iph->saddr, info->source) ^
(info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
if (ret && info->dest)
- ret &= match_type(dev, iph->daddr, info->dest) ^
+ ret &= match_type(net, dev, iph->daddr, info->dest) ^
!!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
return ret;
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 4a7c3527539..b2141e11575 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -60,9 +60,8 @@ static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
static int ipv4_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
- return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
- NIPQUAD(tuple->src.u3.ip),
- NIPQUAD(tuple->dst.u3.ip));
+ return seq_printf(s, "src=%pI4 dst=%pI4 ",
+ &tuple->src.u3.ip, &tuple->dst.u3.ip);
}
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
@@ -198,7 +197,7 @@ static ctl_table ip_ct_sysctl_table[] = {
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_COUNT,
@@ -206,7 +205,7 @@ static ctl_table ip_ct_sysctl_table[] = {
.data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS,
@@ -214,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] = {
.data = &nf_conntrack_htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM,
@@ -222,7 +221,7 @@ static ctl_table ip_ct_sysctl_table[] = {
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID,
@@ -230,8 +229,8 @@ static ctl_table ip_ct_sysctl_table[] = {
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
+ .strategy = sysctl_intvec,
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
@@ -284,17 +283,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
- pr_debug("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
- NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+ pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
+ &sin.sin_addr.s_addr, ntohs(sin.sin_port));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
- pr_debug("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
- NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port),
- NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port));
+ pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
+ &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
+ &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
return -ENOENT;
}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 4e887922022..1fd3ef7718b 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -272,7 +272,7 @@ static struct ctl_table icmp_sysctl_table[] = {
.data = &nf_ct_icmp_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
{
.ctl_name = 0
@@ -285,7 +285,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
.data = &nf_ct_icmp_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
{
.ctl_name = 0
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index ee47bf28c82..7e8e6fc7541 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -119,10 +119,9 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
(ntohl(addr.ip) & 0xff000000) == 0x7f000000)
i = 0;
- pr_debug("nf_nat_ras: set signal address "
- "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(addr.ip), port,
- NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip),
+ pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n",
+ &addr.ip, port,
+ &ct->tuplehash[!dir].tuple.dst.u3.ip,
info->sig_port[!dir]);
return set_h225_addr(skb, data, 0, &taddr[i],
&ct->tuplehash[!dir].
@@ -131,10 +130,9 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
} else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
port == info->sig_port[dir]) {
/* GK->GW */
- pr_debug("nf_nat_ras: set signal address "
- "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(addr.ip), port,
- NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip),
+ pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n",
+ &addr.ip, port,
+ &ct->tuplehash[!dir].tuple.src.u3.ip,
info->sig_port[!dir]);
return set_h225_addr(skb, data, 0, &taddr[i],
&ct->tuplehash[!dir].
@@ -162,10 +160,9 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
port == ct->tuplehash[dir].tuple.src.u.udp.port) {
- pr_debug("nf_nat_ras: set rasAddress "
- "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(addr.ip), ntohs(port),
- NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip),
+ pr_debug("nf_nat_ras: set rasAddress %pI4:%hu->%pI4:%hu\n",
+ &addr.ip, ntohs(port),
+ &ct->tuplehash[!dir].tuple.dst.u3.ip,
ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port));
return set_h225_addr(skb, data, 0, &taddr[i],
&ct->tuplehash[!dir].tuple.dst.u3,
@@ -257,15 +254,15 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
}
/* Success */
- pr_debug("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(rtp_exp->tuple.src.u3.ip),
+ pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
+ &rtp_exp->tuple.src.u3.ip,
ntohs(rtp_exp->tuple.src.u.udp.port),
- NIPQUAD(rtp_exp->tuple.dst.u3.ip),
+ &rtp_exp->tuple.dst.u3.ip,
ntohs(rtp_exp->tuple.dst.u.udp.port));
- pr_debug("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(rtcp_exp->tuple.src.u3.ip),
+ pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n",
+ &rtcp_exp->tuple.src.u3.ip,
ntohs(rtcp_exp->tuple.src.u.udp.port),
- NIPQUAD(rtcp_exp->tuple.dst.u3.ip),
+ &rtcp_exp->tuple.dst.u3.ip,
ntohs(rtcp_exp->tuple.dst.u.udp.port));
return 0;
@@ -307,10 +304,10 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
return -1;
}
- pr_debug("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(exp->tuple.src.u3.ip),
+ pr_debug("nf_nat_h323: expect T.120 %pI4:%hu->%pI4:%hu\n",
+ &exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
- NIPQUAD(exp->tuple.dst.u3.ip),
+ &exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
@@ -361,10 +358,10 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
return -1;
}
- pr_debug("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(exp->tuple.src.u3.ip),
+ pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n",
+ &exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
- NIPQUAD(exp->tuple.dst.u3.ip),
+ &exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
@@ -455,10 +452,10 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
}
/* Success */
- pr_debug("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(exp->tuple.src.u3.ip),
+ pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n",
+ &exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
- NIPQUAD(exp->tuple.dst.u3.ip),
+ &exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
@@ -524,11 +521,10 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
}
/* Success */
- pr_debug("nf_nat_q931: expect Call Forwarding "
- "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
- NIPQUAD(exp->tuple.src.u3.ip),
+ pr_debug("nf_nat_q931: expect Call Forwarding %pI4:%hu->%pI4:%hu\n",
+ &exp->tuple.src.u3.ip,
ntohs(exp->tuple.src.u.tcp.port),
- NIPQUAD(exp->tuple.dst.u3.ip),
+ &exp->tuple.dst.u3.ip,
ntohs(exp->tuple.dst.u.tcp.port));
return 0;
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index fe6f9cef6c8..ea83a886b03 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -55,8 +55,8 @@ static unsigned int help(struct sk_buff *skb,
ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
sprintf(buffer, "%u %u", ip, port);
- pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n",
- buffer, NIPQUAD(ip), port);
+ pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
+ buffer, &ip, port);
ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
matchoff, matchlen, buffer,
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 8d489e746b2..a7eb0471904 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -86,25 +86,6 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par)
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
}
-/* Before 2.6.11 we did implicit source NAT if required. Warn about change. */
-static void warn_if_extra_mangle(struct net *net, __be32 dstip, __be32 srcip)
-{
- static int warned = 0;
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } };
- struct rtable *rt;
-
- if (ip_route_output_key(net, &rt, &fl) != 0)
- return;
-
- if (rt->rt_src != srcip && !warned) {
- printk("NAT: no longer support implicit source local NAT\n");
- printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n",
- NIPQUAD(srcip), NIPQUAD(dstip));
- warned = 1;
- }
- ip_rt_put(rt);
-}
-
static unsigned int
ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
{
@@ -120,11 +101,6 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
/* Connection must be valid and new. */
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
- if (par->hooknum == NF_INET_LOCAL_OUT &&
- mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)
- warn_if_extra_mangle(dev_net(par->out), ip_hdr(skb)->daddr,
- mr->range[0].min_ip);
-
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
}
@@ -166,8 +142,7 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
struct nf_nat_range range
= { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } };
- pr_debug("Allocating NULL binding for %p (%u.%u.%u.%u)\n",
- ct, NIPQUAD(ip));
+ pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, &ip);
return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 14544320c54..07d61a57613 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -74,8 +74,7 @@ static int map_addr(struct sk_buff *skb,
if (newaddr == addr->ip && newport == port)
return 1;
- buflen = sprintf(buffer, "%u.%u.%u.%u:%u",
- NIPQUAD(newaddr), ntohs(newport));
+ buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
buffer, buflen);
@@ -152,8 +151,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
&addr) > 0 &&
addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
- __be32 ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
- buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
+ buflen = sprintf(buffer, "%pI4",
+ &ct->tuplehash[!dir].tuple.dst.u3.ip);
if (!mangle_packet(skb, dptr, datalen, poff, plen,
buffer, buflen))
return NF_DROP;
@@ -166,8 +165,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
&addr) > 0 &&
addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
- __be32 ip = ct->tuplehash[!dir].tuple.src.u3.ip;
- buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
+ buflen = sprintf(buffer, "%pI4",
+ &ct->tuplehash[!dir].tuple.src.u3.ip);
if (!mangle_packet(skb, dptr, datalen, poff, plen,
buffer, buflen))
return NF_DROP;
@@ -279,8 +278,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
if (exp->tuple.dst.u3.ip != exp->saved_ip ||
exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
- buflen = sprintf(buffer, "%u.%u.%u.%u:%u",
- NIPQUAD(newip), port);
+ buflen = sprintf(buffer, "%pI4:%u", &newip, port);
if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
buffer, buflen))
goto err;
@@ -345,7 +343,7 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
char buffer[sizeof("nnn.nnn.nnn.nnn")];
unsigned int buflen;
- buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip));
+ buflen = sprintf(buffer, "%pI4", &addr->ip);
if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
buffer, buflen))
return 0;
@@ -380,7 +378,7 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
unsigned int buflen;
/* Mangle session description owner and contact addresses */
- buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip));
+ buflen = sprintf(buffer, "%pI4", &addr->ip);
if (mangle_sdp_packet(skb, dptr, dataoff, datalen,
SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
buffer, buflen))
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 8303e4b406c..182f845de92 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -930,8 +930,8 @@ static inline void mangle_address(unsigned char *begin,
}
if (debug)
- printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
- "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
+ printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n",
+ &old, addr);
}
}
@@ -1267,9 +1267,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
*/
if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
if (net_ratelimit())
- printk(KERN_WARNING "SNMP: dropping malformed packet "
- "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n",
- NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
+ printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
+ &iph->saddr, &iph->daddr);
return NF_DROP;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index a631a1f110c..614958b7c27 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -54,8 +54,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
sock_prot_inuse_get(net, &tcp_prot),
- atomic_read(&tcp_orphan_count),
- tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
+ (int)percpu_counter_sum_positive(&tcp_orphan_count),
+ tcp_death_row.tw_count,
+ (int)percpu_counter_sum_positive(&tcp_sockets_allocated),
atomic_read(&tcp_memory_allocated));
seq_printf(seq, "UDP: inuse %d mem %d\n",
sock_prot_inuse_get(net, &udp_prot),
@@ -234,6 +235,9 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS),
SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND),
SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED),
+ SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
+ SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
+ SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index cd975743bcd..dff8bc4e0fa 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
}
if (inet->recverr) {
- struct iphdr *iph = (struct iphdr*)skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
u8 *payload = skb->data + (iph->ihl << 2);
if (inet->hdrincl)
@@ -465,7 +465,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
*/
if (msg->msg_namelen) {
- struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name;
+ struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
@@ -572,7 +572,7 @@ back_from_confirm:
ipc.addr = rt->rt_dst;
lock_sock(sk);
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
- &ipc, rt, msg->msg_flags);
+ &ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
@@ -851,7 +851,7 @@ struct proto raw_prot = {
static struct sock *raw_get_first(struct seq_file *seq)
{
struct sock *sk;
- struct raw_iter_state* state = raw_seq_private(seq);
+ struct raw_iter_state *state = raw_seq_private(seq);
for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
@@ -868,7 +868,7 @@ found:
static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
{
- struct raw_iter_state* state = raw_seq_private(seq);
+ struct raw_iter_state *state = raw_seq_private(seq);
do {
sk = sk_next(sk);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2ea6dcc3e2c..77bfba97595 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
+static int rt_chain_length_max __read_mostly = 20;
static void rt_worker_func(struct work_struct *work);
static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
@@ -145,6 +146,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
static int rt_garbage_collect(struct dst_ops *ops);
+static void rt_emergency_hash_rebuild(struct net *net);
static struct dst_ops ipv4_dst_ops = {
@@ -158,7 +160,6 @@ static struct dst_ops ipv4_dst_ops = {
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
.local_out = __ip_local_out,
- .entry_size = sizeof(struct rtable),
.entries = ATOMIC_INIT(0),
};
@@ -201,6 +202,7 @@ const __u8 ip_tos2prio[16] = {
struct rt_hash_bucket {
struct rtable *chain;
};
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
defined(CONFIG_PROVE_LOCKING)
/*
@@ -674,6 +676,20 @@ static inline u32 rt_score(struct rtable *rt)
return score;
}
+static inline bool rt_caching(const struct net *net)
+{
+ return net->ipv4.current_rt_cache_rebuild_count <=
+ net->ipv4.sysctl_rt_cache_rebuild_count;
+}
+
+static inline bool compare_hash_inputs(const struct flowi *fl1,
+ const struct flowi *fl2)
+{
+ return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+ (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+ (fl1->iif ^ fl2->iif)) == 0);
+}
+
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
@@ -753,11 +769,24 @@ static void rt_do_flush(int process_context)
}
}
+/*
+ * While freeing expired entries, we compute average chain length
+ * and standard deviation, using fixed-point arithmetic.
+ * This to have an estimation of rt_chain_length_max
+ * rt_chain_length_max = max(elasticity, AVG + 4*SD)
+ * We use 3 bits for frational part, and 29 (or 61) for magnitude.
+ */
+
+#define FRACT_BITS 3
+#define ONE (1UL << FRACT_BITS)
+
static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
struct rtable *rth, **rthp;
+ unsigned long length = 0, samples = 0;
+ unsigned long sum = 0, sum2 = 0;
u64 mult;
mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
@@ -766,6 +795,7 @@ static void rt_check_expire(void)
goal = (unsigned int)mult;
if (goal > rt_hash_mask)
goal = rt_hash_mask + 1;
+ length = 0;
for (; goal > 0; goal--) {
unsigned long tmo = ip_rt_gc_timeout;
@@ -775,6 +805,8 @@ static void rt_check_expire(void)
if (need_resched())
cond_resched();
+ samples++;
+
if (*rthp == NULL)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
@@ -789,11 +821,29 @@ static void rt_check_expire(void)
if (time_before_eq(jiffies, rth->u.dst.expires)) {
tmo >>= 1;
rthp = &rth->u.dst.rt_next;
+ /*
+ * Only bump our length if the hash
+ * inputs on entries n and n+1 are not
+ * the same, we only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ if ((*rthp == NULL) ||
+ !compare_hash_inputs(&(*rthp)->fl,
+ &rth->fl))
+ length += ONE;
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1;
rthp = &rth->u.dst.rt_next;
+ if ((*rthp == NULL) ||
+ !compare_hash_inputs(&(*rthp)->fl,
+ &rth->fl))
+ length += ONE;
continue;
}
@@ -802,6 +852,15 @@ static void rt_check_expire(void)
rt_free(rth);
}
spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
}
rover = i;
}
@@ -851,6 +910,26 @@ static void rt_secret_rebuild(unsigned long __net)
mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
}
+static void rt_secret_rebuild_oneshot(struct net *net)
+{
+ del_timer_sync(&net->ipv4.rt_secret_timer);
+ rt_cache_invalidate(net);
+ if (ip_rt_secret_interval) {
+ net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
+ add_timer(&net->ipv4.rt_secret_timer);
+ }
+}
+
+static void rt_emergency_hash_rebuild(struct net *net)
+{
+ if (net_ratelimit()) {
+ printk(KERN_WARNING "Route hash chain too long!\n");
+ printk(KERN_WARNING "Adjust your secret_interval!\n");
+ }
+
+ rt_secret_rebuild_oneshot(net);
+}
+
/*
Short description of GC goals.
@@ -989,6 +1068,7 @@ out: return 0;
static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
{
struct rtable *rth, **rthp;
+ struct rtable *rthi;
unsigned long now;
struct rtable *cand, **candp;
u32 min_score;
@@ -1002,7 +1082,13 @@ restart:
candp = NULL;
now = jiffies;
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ rt_drop(rt);
+ return 0;
+ }
+
rthp = &rt_hash_table[hash].chain;
+ rthi = NULL;
spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) {
@@ -1048,6 +1134,17 @@ restart:
chain_length++;
rthp = &rth->u.dst.rt_next;
+
+ /*
+ * check to see if the next entry in the chain
+ * contains the same hash input values as rt. If it does
+ * This is where we will insert into the list, instead of
+ * at the head. This groups entries that differ by aspects not
+ * relvant to the hash function together, which we use to adjust
+ * our chain length
+ */
+ if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
+ rthi = rth;
}
if (cand) {
@@ -1061,6 +1158,16 @@ restart:
*candp = cand->u.dst.rt_next;
rt_free(cand);
}
+ } else {
+ if (chain_length > rt_chain_length_max) {
+ struct net *net = dev_net(rt->u.dst.dev);
+ int num = ++net->ipv4.current_rt_cache_rebuild_count;
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
+ rt->u.dst.dev->name, num);
+ }
+ rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
+ }
}
/* Try to bind route to arp only if it is output
@@ -1098,14 +1205,17 @@ restart:
}
}
- rt->u.dst.rt_next = rt_hash_table[hash].chain;
+ if (rthi)
+ rt->u.dst.rt_next = rthi->u.dst.rt_next;
+ else
+ rt->u.dst.rt_next = rt_hash_table[hash].chain;
+
#if RT_CACHE_DEBUG >= 2
if (rt->u.dst.rt_next) {
struct rtable *trt;
- printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash,
- NIPQUAD(rt->rt_dst));
+ printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
- printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst));
+ printk(" . %pI4", &trt->rt_dst);
printk("\n");
}
#endif
@@ -1114,7 +1224,11 @@ restart:
* previous writes to rt are comitted to memory
* before making rt visible to other CPUS.
*/
- rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+ if (rthi)
+ rcu_assign_pointer(rthi->u.dst.rt_next, rt);
+ else
+ rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+
spin_unlock_bh(rt_hash_lock_addr(hash));
*rp = rt;
return 0;
@@ -1217,6 +1331,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|| ipv4_is_zeronet(new_gw))
goto reject_redirect;
+ if (!rt_caching(net))
+ goto reject_redirect;
+
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
@@ -1267,7 +1384,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
/* Copy all the information. */
*rt = *rth;
- INIT_RCU_HEAD(&rt->u.dst.rcu_head);
rt->u.dst.__use = 1;
atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.child = NULL;
@@ -1280,7 +1396,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rt->u.dst.path = &rt->u.dst;
rt->u.dst.neighbour = NULL;
rt->u.dst.hh = NULL;
+#ifdef CONFIG_XFRM
rt->u.dst.xfrm = NULL;
+#endif
rt->rt_genid = rt_genid(net);
rt->rt_flags |= RTCF_REDIRECTED;
@@ -1324,11 +1442,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about "
- NIPQUAD_FMT " ignored.\n"
- " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
- NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
- NIPQUAD(saddr), NIPQUAD(daddr));
+ printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
+ " Advised path = %pI4 -> %pI4\n",
+ &old_gw, dev->name, &new_gw,
+ &saddr, &daddr);
#endif
in_dev_put(in_dev);
}
@@ -1348,9 +1465,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
rt->fl.oif,
rt_genid(dev_net(dst->dev)));
#if RT_CACHE_DEBUG >= 1
- printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
- NIPQUAD_FMT "/%02x dropped\n",
- NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
+ printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
+ &rt->rt_dst, rt->fl.fl4_tos);
#endif
rt_del(hash, rt);
ret = NULL;
@@ -1414,10 +1530,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (IN_DEV_LOG_MARTIANS(in_dev) &&
rt->u.dst.rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
- printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores "
- "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n",
- NIPQUAD(rt->rt_src), rt->rt_iif,
- NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
+ printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
+ &rt->rt_src, rt->rt_iif,
+ &rt->rt_dst, &rt->rt_gateway);
#endif
}
out:
@@ -1610,8 +1725,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
static int ip_rt_bug(struct sk_buff *skb)
{
- printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n",
- NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
+ printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
+ &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
return 0;
@@ -1788,9 +1903,8 @@ static void ip_handle_martian_source(struct net_device *dev,
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
- printk(KERN_WARNING "martian source " NIPQUAD_FMT " from "
- NIPQUAD_FMT", on dev %s\n",
- NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+ printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
+ &daddr, &saddr, dev->name);
if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
int i;
const unsigned char *p = skb_mac_header(skb);
@@ -2099,9 +2213,8 @@ martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from "
- NIPQUAD_FMT ", dev %s\n",
- NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+ printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
+ &daddr, &saddr, dev->name);
#endif
e_hostunreach:
@@ -2130,6 +2243,10 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct net *net;
net = dev_net(dev);
+
+ if (!rt_caching(net))
+ goto skip_cache;
+
tos &= IPTOS_RT_MASK;
hash = rt_hash(daddr, saddr, iif, rt_genid(net));
@@ -2154,6 +2271,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
}
rcu_read_unlock();
+skip_cache:
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
@@ -2539,6 +2657,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
unsigned hash;
struct rtable *rth;
+ if (!rt_caching(net))
+ goto slow_output;
+
hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
rcu_read_lock_bh();
@@ -2563,6 +2684,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
}
rcu_read_unlock_bh();
+slow_output:
return ip_route_output_slow(net, rp, flp);
}
@@ -2578,7 +2700,6 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.destroy = ipv4_dst_destroy,
.check = ipv4_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
- .entry_size = sizeof(struct rtable),
.entries = ATOMIC_INIT(0),
};
@@ -2640,7 +2761,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
flp->fl4_src = (*rp)->rt_src;
if (!flp->fl4_dst)
flp->fl4_dst = (*rp)->rt_dst;
- err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
+ err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
flags ? XFRM_LOOKUP_WAIT : 0);
if (err == -EREMOTE)
err = ipv4_dst_blackhole(net, rp, flp);
@@ -2995,7 +3116,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ipv4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
@@ -3003,7 +3124,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
/* Deprecated. Use gc_min_interval_ms */
@@ -3013,8 +3134,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
@@ -3022,8 +3143,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_ms_jiffies,
- .strategy = &sysctl_ms_jiffies,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
@@ -3031,8 +3152,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
@@ -3040,8 +3161,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
@@ -3049,7 +3170,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
@@ -3057,7 +3178,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_redirect_number,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
@@ -3065,7 +3186,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_redirect_silence,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_ERROR_COST,
@@ -3073,7 +3194,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_error_cost,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
@@ -3081,7 +3202,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_error_burst,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
@@ -3089,7 +3210,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
@@ -3097,8 +3218,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
@@ -3106,7 +3227,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
@@ -3114,7 +3235,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
@@ -3122,8 +3243,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_secret_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &ipv4_sysctl_rt_secret_interval,
- .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
+ .proc_handler = ipv4_sysctl_rt_secret_interval,
+ .strategy = ipv4_sysctl_rt_secret_interval_strategy,
},
{ .ctl_name = 0 }
};
@@ -3151,8 +3272,8 @@ static struct ctl_table ipv4_route_flush_table[] = {
.procname = "flush",
.maxlen = sizeof(int),
.mode = 0200,
- .proc_handler = &ipv4_sysctl_rtcache_flush,
- .strategy = &ipv4_sysctl_rtcache_flush_strategy,
+ .proc_handler = ipv4_sysctl_rtcache_flush,
+ .strategy = ipv4_sysctl_rtcache_flush_strategy,
},
{ .ctl_name = 0 },
};
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1bb10df8ce7..4710d219f06 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -195,7 +195,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_timestamps,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_WINDOW_SCALING,
@@ -203,7 +203,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_window_scaling,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_SACK,
@@ -211,7 +211,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_sack,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE,
@@ -219,7 +219,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_retrans_collapse,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_DEFAULT_TTL,
@@ -227,8 +227,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_ip_default_ttl,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &ipv4_doint_and_flush,
- .strategy = &ipv4_doint_and_flush_strategy,
+ .proc_handler = ipv4_doint_and_flush,
+ .strategy = ipv4_doint_and_flush_strategy,
.extra2 = &init_net,
},
{
@@ -237,7 +237,7 @@ static struct ctl_table ipv4_table[] = {
.data = &ipv4_config.no_pmtu_disc,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_NONLOCAL_BIND,
@@ -245,7 +245,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_ip_nonlocal_bind,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_SYN_RETRIES,
@@ -253,7 +253,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_syn_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_SYNACK_RETRIES,
@@ -261,7 +261,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_synack_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_MAX_ORPHANS,
@@ -269,7 +269,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_max_orphans,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_MAX_TW_BUCKETS,
@@ -277,7 +277,7 @@ static struct ctl_table ipv4_table[] = {
.data = &tcp_death_row.sysctl_max_tw_buckets,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_DYNADDR,
@@ -285,7 +285,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_ip_dynaddr,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_KEEPALIVE_TIME,
@@ -293,8 +293,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_keepalive_time,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.ctl_name = NET_IPV4_TCP_KEEPALIVE_PROBES,
@@ -302,7 +302,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_keepalive_probes,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_KEEPALIVE_INTVL,
@@ -310,8 +310,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_keepalive_intvl,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.ctl_name = NET_IPV4_TCP_RETRIES1,
@@ -319,8 +319,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_retries1,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
+ .strategy = sysctl_intvec,
.extra2 = &tcp_retr1_max
},
{
@@ -329,7 +329,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_retries2,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_TCP_FIN_TIMEOUT,
@@ -337,8 +337,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_fin_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
#ifdef CONFIG_SYN_COOKIES
{
@@ -347,7 +347,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_syncookies,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
#endif
{
@@ -356,7 +356,7 @@ static struct ctl_table ipv4_table[] = {
.data = &tcp_death_row.sysctl_tw_recycle,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_ABORT_ON_OVERFLOW,
@@ -364,7 +364,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_abort_on_overflow,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_STDURG,
@@ -372,7 +372,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_stdurg,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_RFC1337,
@@ -380,7 +380,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_rfc1337,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_MAX_SYN_BACKLOG,
@@ -388,7 +388,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_max_syn_backlog,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_LOCAL_PORT_RANGE,
@@ -396,8 +396,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_local_ports.range,
.maxlen = sizeof(sysctl_local_ports.range),
.mode = 0644,
- .proc_handler = &ipv4_local_port_range,
- .strategy = &ipv4_sysctl_local_port_range,
+ .proc_handler = ipv4_local_port_range,
+ .strategy = ipv4_sysctl_local_port_range,
},
#ifdef CONFIG_IP_MULTICAST
{
@@ -406,7 +406,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_igmp_max_memberships,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
#endif
@@ -416,7 +416,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_igmp_max_msf,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_INET_PEER_THRESHOLD,
@@ -424,7 +424,7 @@ static struct ctl_table ipv4_table[] = {
.data = &inet_peer_threshold,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_INET_PEER_MINTTL,
@@ -432,8 +432,8 @@ static struct ctl_table ipv4_table[] = {
.data = &inet_peer_minttl,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.ctl_name = NET_IPV4_INET_PEER_MAXTTL,
@@ -441,8 +441,8 @@ static struct ctl_table ipv4_table[] = {
.data = &inet_peer_maxttl,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.ctl_name = NET_IPV4_INET_PEER_GC_MINTIME,
@@ -450,8 +450,8 @@ static struct ctl_table ipv4_table[] = {
.data = &inet_peer_gc_mintime,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.ctl_name = NET_IPV4_INET_PEER_GC_MAXTIME,
@@ -459,8 +459,8 @@ static struct ctl_table ipv4_table[] = {
.data = &inet_peer_gc_maxtime,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies
},
{
.ctl_name = NET_TCP_ORPHAN_RETRIES,
@@ -468,7 +468,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_orphan_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_FACK,
@@ -476,7 +476,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_fack,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_REORDERING,
@@ -484,7 +484,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_reordering,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_ECN,
@@ -492,7 +492,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_ecn,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_DSACK,
@@ -500,7 +500,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_dsack,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_MEM,
@@ -508,7 +508,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_mem,
.maxlen = sizeof(sysctl_tcp_mem),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_WMEM,
@@ -516,7 +516,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_wmem,
.maxlen = sizeof(sysctl_tcp_wmem),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_RMEM,
@@ -524,7 +524,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_rmem,
.maxlen = sizeof(sysctl_tcp_rmem),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_APP_WIN,
@@ -532,7 +532,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_app_win,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_ADV_WIN_SCALE,
@@ -540,7 +540,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_adv_win_scale,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_TW_REUSE,
@@ -548,7 +548,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_tw_reuse,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_FRTO,
@@ -556,7 +556,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_frto,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_FRTO_RESPONSE,
@@ -564,7 +564,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_frto_response,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_LOW_LATENCY,
@@ -572,7 +572,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_low_latency,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_TCP_NO_METRICS_SAVE,
@@ -580,7 +580,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_nometrics_save,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_TCP_MODERATE_RCVBUF,
@@ -588,7 +588,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_moderate_rcvbuf,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_TCP_TSO_WIN_DIVISOR,
@@ -596,15 +596,15 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_tso_win_divisor,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_TCP_CONG_CONTROL,
.procname = "tcp_congestion_control",
.mode = 0644,
.maxlen = TCP_CA_NAME_MAX,
- .proc_handler = &proc_tcp_congestion_control,
- .strategy = &sysctl_tcp_congestion_control,
+ .proc_handler = proc_tcp_congestion_control,
+ .strategy = sysctl_tcp_congestion_control,
},
{
.ctl_name = NET_TCP_ABC,
@@ -612,7 +612,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_abc,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_TCP_MTU_PROBING,
@@ -620,7 +620,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_mtu_probing,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_TCP_BASE_MSS,
@@ -628,7 +628,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_base_mss,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS,
@@ -636,7 +636,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_workaround_signed_windows,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
#ifdef CONFIG_NET_DMA
{
@@ -645,7 +645,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_dma_copybreak,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
#endif
{
@@ -654,7 +654,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_slow_start_after_idle,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
#ifdef CONFIG_NETLABEL
{
@@ -663,7 +663,7 @@ static struct ctl_table ipv4_table[] = {
.data = &cipso_v4_cache_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_CIPSOV4_CACHE_BUCKET_SIZE,
@@ -671,7 +671,7 @@ static struct ctl_table ipv4_table[] = {
.data = &cipso_v4_cache_bucketsize,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_CIPSOV4_RBM_OPTFMT,
@@ -679,7 +679,7 @@ static struct ctl_table ipv4_table[] = {
.data = &cipso_v4_rbm_optfmt,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_CIPSOV4_RBM_STRICTVALID,
@@ -687,22 +687,22 @@ static struct ctl_table ipv4_table[] = {
.data = &cipso_v4_rbm_strictvalid,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif /* CONFIG_NETLABEL */
{
.procname = "tcp_available_congestion_control",
.maxlen = TCP_CA_BUF_MAX,
.mode = 0444,
- .proc_handler = &proc_tcp_available_congestion_control,
+ .proc_handler = proc_tcp_available_congestion_control,
},
{
.ctl_name = NET_TCP_ALLOWED_CONG_CONTROL,
.procname = "tcp_allowed_congestion_control",
.maxlen = TCP_CA_BUF_MAX,
.mode = 0644,
- .proc_handler = &proc_allowed_congestion_control,
- .strategy = &strategy_allowed_congestion_control,
+ .proc_handler = proc_allowed_congestion_control,
+ .strategy = strategy_allowed_congestion_control,
},
{
.ctl_name = NET_TCP_MAX_SSTHRESH,
@@ -710,7 +710,7 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_max_ssthresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -718,8 +718,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
+ .strategy = sysctl_intvec,
.extra1 = &zero
},
{
@@ -728,8 +728,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_udp_rmem_min,
.maxlen = sizeof(sysctl_udp_rmem_min),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
+ .strategy = sysctl_intvec,
.extra1 = &zero
},
{
@@ -738,8 +738,8 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_udp_wmem_min,
.maxlen = sizeof(sysctl_udp_wmem_min),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
+ .strategy = sysctl_intvec,
.extra1 = &zero
},
{ .ctl_name = 0 }
@@ -752,7 +752,7 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_icmp_echo_ignore_all,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS,
@@ -760,7 +760,7 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES,
@@ -768,7 +768,7 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR,
@@ -776,7 +776,7 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
.ctl_name = NET_IPV4_ICMP_RATELIMIT,
@@ -784,8 +784,8 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_icmp_ratelimit,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_ms_jiffies,
- .strategy = &sysctl_ms_jiffies
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies
},
{
.ctl_name = NET_IPV4_ICMP_RATEMASK,
@@ -793,7 +793,15 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_icmp_ratemask,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "rt_cache_rebuild_count",
+ .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
},
{ }
};
@@ -827,8 +835,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
&net->ipv4.sysctl_icmp_ratelimit;
table[5].data =
&net->ipv4.sysctl_icmp_ratemask;
+ table[6].data =
+ &net->ipv4.sysctl_rt_cache_rebuild_count;
}
+ net->ipv4.sysctl_rt_cache_rebuild_count = 4;
+
net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
net_ipv4_ctl_path, table);
if (net->ipv4.ipv4_hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c5aca0bb116..1f3d52946b3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -277,8 +277,7 @@
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
-atomic_t tcp_orphan_count = ATOMIC_INIT(0);
-
+struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
int sysctl_tcp_mem[3] __read_mostly;
@@ -290,9 +289,12 @@ EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
atomic_t tcp_memory_allocated; /* Current allocated memory. */
-atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
-
EXPORT_SYMBOL(tcp_memory_allocated);
+
+/*
+ * Current number of TCP sockets.
+ */
+struct percpu_counter tcp_sockets_allocated;
EXPORT_SYMBOL(tcp_sockets_allocated);
/*
@@ -1680,7 +1682,7 @@ void tcp_set_state(struct sock *sk, int state)
inet_put_port(sk);
/* fall through */
default:
- if (oldstate==TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
@@ -1690,7 +1692,7 @@ void tcp_set_state(struct sock *sk, int state)
sk->sk_state = state;
#ifdef STATE_TRACE
- SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
+ SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
#endif
}
EXPORT_SYMBOL_GPL(tcp_set_state);
@@ -1834,7 +1836,7 @@ adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
- atomic_inc(sk->sk_prot->orphan_count);
+ percpu_counter_inc(sk->sk_prot->orphan_count);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
@@ -1885,9 +1887,11 @@ adjudge_to_death:
}
}
if (sk->sk_state != TCP_CLOSE) {
+ int orphan_count = percpu_counter_read_positive(
+ sk->sk_prot->orphan_count);
+
sk_mem_reclaim(sk);
- if (tcp_too_many_orphans(sk,
- atomic_read(sk->sk_prot->orphan_count))) {
+ if (tcp_too_many_orphans(sk, orphan_count)) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
"sockets\n");
@@ -2461,6 +2465,106 @@ out:
}
EXPORT_SYMBOL(tcp_tso_segment);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ struct tcphdr *th;
+ struct tcphdr *th2;
+ unsigned int thlen;
+ unsigned int flags;
+ unsigned int total;
+ unsigned int mss = 1;
+ int flush = 1;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ goto out;
+
+ th = tcp_hdr(skb);
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ goto out;
+
+ if (!pskb_may_pull(skb, thlen))
+ goto out;
+
+ th = tcp_hdr(skb);
+ __skb_pull(skb, thlen);
+
+ flags = tcp_flag_word(th);
+
+ for (; (p = *head); head = &p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+
+ if (th->source != th2->source || th->dest != th2->dest) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ goto found;
+ }
+
+ goto out_check_final;
+
+found:
+ flush = NAPI_GRO_CB(p)->flush;
+ flush |= flags & TCP_FLAG_CWR;
+ flush |= (flags ^ tcp_flag_word(th2)) &
+ ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
+ flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
+ flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
+
+ total = p->len;
+ mss = total;
+ if (skb_shinfo(p)->frag_list)
+ mss = skb_shinfo(p)->frag_list->len;
+
+ flush |= skb->len > mss || skb->len <= 0;
+ flush |= ntohl(th2->seq) + total != ntohl(th->seq);
+
+ if (flush || skb_gro_receive(head, skb)) {
+ mss = 1;
+ goto out_check_final;
+ }
+
+ p = *head;
+ th2 = tcp_hdr(p);
+ tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+
+out_check_final:
+ flush = skb->len < mss;
+ flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
+ TCP_FLAG_SYN | TCP_FLAG_FIN);
+
+ if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
+ pp = head;
+
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+int tcp_gro_complete(struct sk_buff *skb)
+{
+ struct tcphdr *th = tcp_hdr(skb);
+
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ return 0;
+}
+
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
static struct tcp_md5sig_pool **tcp_md5sig_pool;
@@ -2650,7 +2754,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
void tcp_done(struct sock *sk)
{
- if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE);
@@ -2685,6 +2789,8 @@ void __init tcp_init(void)
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
+ percpu_counter_init(&tcp_sockets_allocated, 0);
+ percpu_counter_init(&tcp_orphan_count, 0);
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
@@ -2707,8 +2813,8 @@ void __init tcp_init(void)
thash_entries ? 0 : 512 * 1024);
tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
- INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
- INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
+ INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
+ INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
}
if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks");
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 4a1221e5e8e..ee467ec40c4 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -1,13 +1,23 @@
/*
- * TCP CUBIC: Binary Increase Congestion control for TCP v2.2
+ * TCP CUBIC: Binary Increase Congestion control for TCP v2.3
* Home page:
* http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
* This is from the implementation of CUBIC TCP in
- * Injong Rhee, Lisong Xu.
- * "CUBIC: A New TCP-Friendly High-Speed TCP Variant
- * in PFLDnet 2005
+ * Sangtae Ha, Injong Rhee and Lisong Xu,
+ * "CUBIC: A New TCP-Friendly High-Speed TCP Variant"
+ * in ACM SIGOPS Operating System Review, July 2008.
* Available from:
- * http://netsrv.csc.ncsu.edu/export/cubic-paper.pdf
+ * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
+ *
+ * CUBIC integrates a new slow start algorithm, called HyStart.
+ * The details of HyStart are presented in
+ * Sangtae Ha and Injong Rhee,
+ * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008.
+ * Available from:
+ * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
+ *
+ * All testing results are available from:
+ * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
*
* Unless CUBIC is enabled and congestion window is large
* this behaves the same as the original Reno.
@@ -23,12 +33,26 @@
*/
#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
+/* Two methods of hybrid slow start */
+#define HYSTART_ACK_TRAIN 0x1
+#define HYSTART_DELAY 0x2
+
+/* Number of delay samples for detecting the increase of delay */
+#define HYSTART_MIN_SAMPLES 8
+#define HYSTART_DELAY_MIN (2U<<3)
+#define HYSTART_DELAY_MAX (16U<<3)
+#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
+
static int fast_convergence __read_mostly = 1;
static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh __read_mostly;
static int bic_scale __read_mostly = 41;
static int tcp_friendliness __read_mostly = 1;
+static int hystart __read_mostly = 1;
+static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
+static int hystart_low_window __read_mostly = 16;
+
static u32 cube_rtt_scale __read_mostly;
static u32 beta_scale __read_mostly;
static u64 cube_factor __read_mostly;
@@ -44,6 +68,13 @@ module_param(bic_scale, int, 0444);
MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)");
module_param(tcp_friendliness, int, 0644);
MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
+module_param(hystart, int, 0644);
+MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
+module_param(hystart_detect, int, 0644);
+MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms"
+ " 1: packet-train 2: delay 3: both packet-train and delay");
+module_param(hystart_low_window, int, 0644);
+MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
/* BIC TCP Parameters */
struct bictcp {
@@ -59,7 +90,13 @@ struct bictcp {
u32 ack_cnt; /* number of acks */
u32 tcp_cwnd; /* estimated tcp cwnd */
#define ACK_RATIO_SHIFT 4
- u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
+ u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
+ u8 sample_cnt; /* number of samples to decide curr_rtt */
+ u8 found; /* the exit point is found? */
+ u32 round_start; /* beginning of each round */
+ u32 end_seq; /* end_seq of the round */
+ u32 last_jiffies; /* last time when the ACK spacing is close */
+ u32 curr_rtt; /* the minimum rtt of current round */
};
static inline void bictcp_reset(struct bictcp *ca)
@@ -76,12 +113,28 @@ static inline void bictcp_reset(struct bictcp *ca)
ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
ca->ack_cnt = 0;
ca->tcp_cwnd = 0;
+ ca->found = 0;
+}
+
+static inline void bictcp_hystart_reset(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bictcp *ca = inet_csk_ca(sk);
+
+ ca->round_start = ca->last_jiffies = jiffies;
+ ca->end_seq = tp->snd_nxt;
+ ca->curr_rtt = 0;
+ ca->sample_cnt = 0;
}
static void bictcp_init(struct sock *sk)
{
bictcp_reset(inet_csk_ca(sk));
- if (initial_ssthresh)
+
+ if (hystart)
+ bictcp_hystart_reset(sk);
+
+ if (!hystart && initial_ssthresh)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
@@ -235,9 +288,11 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (hystart && after(ack, ca->end_seq))
+ bictcp_hystart_reset(sk);
tcp_slow_start(tp);
- else {
+ } else {
bictcp_update(ca, tp->snd_cwnd);
/* In dangerous area, increase slowly.
@@ -281,8 +336,45 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
static void bictcp_state(struct sock *sk, u8 new_state)
{
- if (new_state == TCP_CA_Loss)
+ if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
+ bictcp_hystart_reset(sk);
+ }
+}
+
+static void hystart_update(struct sock *sk, u32 delay)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bictcp *ca = inet_csk_ca(sk);
+
+ if (!(ca->found & hystart_detect)) {
+ u32 curr_jiffies = jiffies;
+
+ /* first detection parameter - ack-train detection */
+ if (curr_jiffies - ca->last_jiffies <= msecs_to_jiffies(2)) {
+ ca->last_jiffies = curr_jiffies;
+ if (curr_jiffies - ca->round_start >= ca->delay_min>>4)
+ ca->found |= HYSTART_ACK_TRAIN;
+ }
+
+ /* obtain the minimum delay of more than sampling packets */
+ if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
+ if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
+
+ ca->sample_cnt++;
+ } else {
+ if (ca->curr_rtt > ca->delay_min +
+ HYSTART_DELAY_THRESH(ca->delay_min>>4))
+ ca->found |= HYSTART_DELAY;
+ }
+ /*
+ * Either one of two conditions are met,
+ * we exit from slow start immediately.
+ */
+ if (ca->found & hystart_detect)
+ tp->snd_ssthresh = tp->snd_cwnd;
+ }
}
/* Track delayed acknowledgment ratio using sliding window
@@ -291,6 +383,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
u32 delay;
@@ -314,6 +407,11 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
/* first time call or link delay decreases */
if (ca->delay_min == 0 || ca->delay_min > delay)
ca->delay_min = delay;
+
+ /* hystart triggers when cwnd is larger than some threshold */
+ if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+ tp->snd_cwnd >= hystart_low_window)
+ hystart_update(sk, delay);
}
static struct tcp_congestion_ops cubictcp = {
@@ -372,4 +470,4 @@ module_exit(cubictcp_unregister);
MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CUBIC TCP");
-MODULE_VERSION("2.2");
+MODULE_VERSION("2.3");
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 838d491dfda..fcbcd4ff6c5 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -34,7 +34,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
tcp_get_info(sk, info);
}
-static struct inet_diag_handler tcp_diag_handler = {
+static const struct inet_diag_handler tcp_diag_handler = {
.idiag_hashinfo = &tcp_hashinfo,
.idiag_get_info = tcp_diag_get_info,
.idiag_type = TCPDIAG_GETSOCK,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d77c0d29e23..99b7ecbe889 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -701,13 +701,10 @@ static inline void tcp_set_rto(struct sock *sk)
* all the algo is pure shit and should be replaced
* with correct one. It is exactly, which we pretend to do.
*/
-}
-/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
- * guarantees that rto is higher.
- */
-static inline void tcp_bound_rto(struct sock *sk)
-{
+ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
+ * guarantees that rto is higher.
+ */
if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
}
@@ -928,7 +925,6 @@ static void tcp_init_metrics(struct sock *sk)
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
}
tcp_set_rto(sk);
- tcp_bound_rto(sk);
if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
goto reset;
tp->snd_cwnd = tcp_init_cwnd(tp, dst);
@@ -1002,7 +998,8 @@ static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
}
}
-void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
+static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
+ struct sk_buff *skb)
{
tcp_verify_retransmit_hint(tp, skb);
@@ -1236,31 +1233,58 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
return dup_sack;
}
+struct tcp_sacktag_state {
+ int reord;
+ int fack_count;
+ int flag;
+};
+
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
* the incoming SACK may not exactly match but we can find smaller MSS
* aligned portion of it that matches. Therefore we might need to fragment
* which may fail and creates some hassle (caller must handle error case
* returns).
+ *
+ * FIXME: this could be merged to shift decision code
*/
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
u32 start_seq, u32 end_seq)
{
int in_sack, err;
unsigned int pkt_len;
+ unsigned int mss;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
if (tcp_skb_pcount(skb) > 1 && !in_sack &&
after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
-
+ mss = tcp_skb_mss(skb);
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
- if (!in_sack)
+ if (!in_sack) {
pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
- else
+ if (pkt_len < mss)
+ pkt_len = mss;
+ } else {
pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
- err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size);
+ if (pkt_len < mss)
+ return -EINVAL;
+ }
+
+ /* Round if necessary so that SACKs cover only full MSSes
+ * and/or the remaining small portion (if present)
+ */
+ if (pkt_len > mss) {
+ unsigned int new_len = (pkt_len / mss) * mss;
+ if (!in_sack && new_len < pkt_len) {
+ new_len += mss;
+ if (new_len > skb->len)
+ return 0;
+ }
+ pkt_len = new_len;
+ }
+ err = tcp_fragment(sk, skb, pkt_len, mss);
if (err < 0)
return err;
}
@@ -1268,24 +1292,25 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
return in_sack;
}
-static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
- int *reord, int dup_sack, int fack_count)
+static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
+ struct tcp_sacktag_state *state,
+ int dup_sack, int pcount)
{
struct tcp_sock *tp = tcp_sk(sk);
u8 sacked = TCP_SKB_CB(skb)->sacked;
- int flag = 0;
+ int fack_count = state->fack_count;
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
- *reord = min(fack_count, *reord);
+ state->reord = min(fack_count, state->reord);
}
/* Nothing to do; acked frame is about to be dropped (was ACKed). */
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
- return flag;
+ return sacked;
if (!(sacked & TCPCB_SACKED_ACKED)) {
if (sacked & TCPCB_SACKED_RETRANS) {
@@ -1294,10 +1319,9 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
* that retransmission is still in flight.
*/
if (sacked & TCPCB_LOST) {
- TCP_SKB_CB(skb)->sacked &=
- ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
- tp->lost_out -= tcp_skb_pcount(skb);
- tp->retrans_out -= tcp_skb_pcount(skb);
+ sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+ tp->lost_out -= pcount;
+ tp->retrans_out -= pcount;
}
} else {
if (!(sacked & TCPCB_RETRANS)) {
@@ -1306,56 +1330,280 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
*/
if (before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
- *reord = min(fack_count, *reord);
+ state->reord = min(fack_count,
+ state->reord);
/* SACK enhanced F-RTO (RFC4138; Appendix B) */
if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
- flag |= FLAG_ONLY_ORIG_SACKED;
+ state->flag |= FLAG_ONLY_ORIG_SACKED;
}
if (sacked & TCPCB_LOST) {
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
- tp->lost_out -= tcp_skb_pcount(skb);
+ sacked &= ~TCPCB_LOST;
+ tp->lost_out -= pcount;
}
}
- TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
- flag |= FLAG_DATA_SACKED;
- tp->sacked_out += tcp_skb_pcount(skb);
+ sacked |= TCPCB_SACKED_ACKED;
+ state->flag |= FLAG_DATA_SACKED;
+ tp->sacked_out += pcount;
- fack_count += tcp_skb_pcount(skb);
+ fack_count += pcount;
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
before(TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(tp->lost_skb_hint)->seq))
- tp->lost_cnt_hint += tcp_skb_pcount(skb);
+ tp->lost_cnt_hint += pcount;
if (fack_count > tp->fackets_out)
tp->fackets_out = fack_count;
-
- if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
- tcp_advance_highest_sack(sk, skb);
}
/* D-SACK. We can detect redundant retransmission in S|R and plain R
* frames and clear it. undo_retrans is decreased above, L|R frames
* are accounted above as well.
*/
- if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) {
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
+ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
+ sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= pcount;
}
- return flag;
+ return sacked;
+}
+
+static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sacktag_state *state,
+ unsigned int pcount, int shifted, int mss)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
+
+ BUG_ON(!pcount);
+
+ /* Tweak before seqno plays */
+ if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
+ !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+ tp->lost_cnt_hint += pcount;
+
+ TCP_SKB_CB(prev)->end_seq += shifted;
+ TCP_SKB_CB(skb)->seq += shifted;
+
+ skb_shinfo(prev)->gso_segs += pcount;
+ BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
+ skb_shinfo(skb)->gso_segs -= pcount;
+
+ /* When we're adding to gso_segs == 1, gso_size will be zero,
+ * in theory this shouldn't be necessary but as long as DSACK
+ * code can come after this skb later on it's better to keep
+ * setting gso_size to something.
+ */
+ if (!skb_shinfo(prev)->gso_size) {
+ skb_shinfo(prev)->gso_size = mss;
+ skb_shinfo(prev)->gso_type = sk->sk_gso_type;
+ }
+
+ /* CHECKME: To clear or not to clear? Mimics normal skb currently */
+ if (skb_shinfo(skb)->gso_segs <= 1) {
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_type = 0;
+ }
+
+ /* We discard results */
+ tcp_sacktag_one(skb, sk, state, 0, pcount);
+
+ /* Difference in this won't matter, both ACKed by the same cumul. ACK */
+ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
+
+ if (skb->len > 0) {
+ BUG_ON(!tcp_skb_pcount(skb));
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
+ return 0;
+ }
+
+ /* Whole SKB was eaten :-) */
+
+ if (skb == tp->retransmit_skb_hint)
+ tp->retransmit_skb_hint = prev;
+ if (skb == tp->scoreboard_skb_hint)
+ tp->scoreboard_skb_hint = prev;
+ if (skb == tp->lost_skb_hint) {
+ tp->lost_skb_hint = prev;
+ tp->lost_cnt_hint -= tcp_skb_pcount(prev);
+ }
+
+ TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
+ if (skb == tcp_highest_sack(sk))
+ tcp_advance_highest_sack(sk, skb);
+
+ tcp_unlink_write_queue(skb, sk);
+ sk_wmem_free_skb(sk, skb);
+
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
+
+ return 1;
+}
+
+/* I wish gso_size would have a bit more sane initialization than
+ * something-or-zero which complicates things
+ */
+static int tcp_skb_seglen(struct sk_buff *skb)
+{
+ return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
+}
+
+/* Shifting pages past head area doesn't work */
+static int skb_can_shift(struct sk_buff *skb)
+{
+ return !skb_headlen(skb) && skb_is_nonlinear(skb);
+}
+
+/* Try collapsing SACK blocks spanning across multiple skbs to a single
+ * skb.
+ */
+static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sacktag_state *state,
+ u32 start_seq, u32 end_seq,
+ int dup_sack)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *prev;
+ int mss;
+ int pcount = 0;
+ int len;
+ int in_sack;
+
+ if (!sk_can_gso(sk))
+ goto fallback;
+
+ /* Normally R but no L won't result in plain S */
+ if (!dup_sack &&
+ (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
+ goto fallback;
+ if (!skb_can_shift(skb))
+ goto fallback;
+ /* This frame is about to be dropped (was ACKed). */
+ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+ goto fallback;
+
+ /* Can only happen with delayed DSACK + discard craziness */
+ if (unlikely(skb == tcp_write_queue_head(sk)))
+ goto fallback;
+ prev = tcp_write_queue_prev(sk, skb);
+
+ if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
+ goto fallback;
+
+ in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
+ !before(end_seq, TCP_SKB_CB(skb)->end_seq);
+
+ if (in_sack) {
+ len = skb->len;
+ pcount = tcp_skb_pcount(skb);
+ mss = tcp_skb_seglen(skb);
+
+ /* TODO: Fix DSACKs to not fragment already SACKed and we can
+ * drop this restriction as unnecessary
+ */
+ if (mss != tcp_skb_seglen(prev))
+ goto fallback;
+ } else {
+ if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
+ goto noop;
+ /* CHECKME: This is non-MSS split case only?, this will
+ * cause skipped skbs due to advancing loop btw, original
+ * has that feature too
+ */
+ if (tcp_skb_pcount(skb) <= 1)
+ goto noop;
+
+ in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
+ if (!in_sack) {
+ /* TODO: head merge to next could be attempted here
+ * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
+ * though it might not be worth of the additional hassle
+ *
+ * ...we can probably just fallback to what was done
+ * previously. We could try merging non-SACKed ones
+ * as well but it probably isn't going to buy off
+ * because later SACKs might again split them, and
+ * it would make skb timestamp tracking considerably
+ * harder problem.
+ */
+ goto fallback;
+ }
+
+ len = end_seq - TCP_SKB_CB(skb)->seq;
+ BUG_ON(len < 0);
+ BUG_ON(len > skb->len);
+
+ /* MSS boundaries should be honoured or else pcount will
+ * severely break even though it makes things bit trickier.
+ * Optimize common case to avoid most of the divides
+ */
+ mss = tcp_skb_mss(skb);
+
+ /* TODO: Fix DSACKs to not fragment already SACKed and we can
+ * drop this restriction as unnecessary
+ */
+ if (mss != tcp_skb_seglen(prev))
+ goto fallback;
+
+ if (len == mss) {
+ pcount = 1;
+ } else if (len < mss) {
+ goto noop;
+ } else {
+ pcount = len / mss;
+ len = pcount * mss;
+ }
+ }
+
+ if (!skb_shift(prev, skb, len))
+ goto fallback;
+ if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss))
+ goto out;
+
+ /* Hole filled allows collapsing with the next as well, this is very
+ * useful when hole on every nth skb pattern happens
+ */
+ if (prev == tcp_write_queue_tail(sk))
+ goto out;
+ skb = tcp_write_queue_next(sk, prev);
+
+ if (!skb_can_shift(skb) ||
+ (skb == tcp_send_head(sk)) ||
+ ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
+ (mss != tcp_skb_seglen(skb)))
+ goto out;
+
+ len = skb->len;
+ if (skb_shift(prev, skb, len)) {
+ pcount += tcp_skb_pcount(skb);
+ tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss);
+ }
+
+out:
+ state->fack_count += pcount;
+ return prev;
+
+noop:
+ return skb;
+
+fallback:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
+ return NULL;
}
static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
struct tcp_sack_block *next_dup,
+ struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
- int dup_sack_in, int *fack_count,
- int *reord, int *flag)
+ int dup_sack_in)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *tmp;
+
tcp_for_write_queue_from(skb, sk) {
int in_sack = 0;
int dup_sack = dup_sack_in;
@@ -1376,17 +1624,42 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
dup_sack = 1;
}
- if (in_sack <= 0)
- in_sack = tcp_match_skb_to_sack(sk, skb, start_seq,
- end_seq);
+ /* skb reference here is a bit tricky to get right, since
+ * shifting can eat and free both this skb and the next,
+ * so not even _safe variant of the loop is enough.
+ */
+ if (in_sack <= 0) {
+ tmp = tcp_shift_skb_data(sk, skb, state,
+ start_seq, end_seq, dup_sack);
+ if (tmp != NULL) {
+ if (tmp != skb) {
+ skb = tmp;
+ continue;
+ }
+
+ in_sack = 0;
+ } else {
+ in_sack = tcp_match_skb_to_sack(sk, skb,
+ start_seq,
+ end_seq);
+ }
+ }
+
if (unlikely(in_sack < 0))
break;
- if (in_sack)
- *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack,
- *fack_count);
+ if (in_sack) {
+ TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
+ state,
+ dup_sack,
+ tcp_skb_pcount(skb));
+
+ if (!before(TCP_SKB_CB(skb)->seq,
+ tcp_highest_sack_seq(tp)))
+ tcp_advance_highest_sack(sk, skb);
+ }
- *fack_count += tcp_skb_pcount(skb);
+ state->fack_count += tcp_skb_pcount(skb);
}
return skb;
}
@@ -1395,16 +1668,17 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
* a normal way
*/
static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
- u32 skip_to_seq, int *fack_count)
+ struct tcp_sacktag_state *state,
+ u32 skip_to_seq)
{
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
- if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
+ if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
break;
- *fack_count += tcp_skb_pcount(skb);
+ state->fack_count += tcp_skb_pcount(skb);
}
return skb;
}
@@ -1412,18 +1686,17 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
struct sock *sk,
struct tcp_sack_block *next_dup,
- u32 skip_to_seq,
- int *fack_count, int *reord,
- int *flag)
+ struct tcp_sacktag_state *state,
+ u32 skip_to_seq)
{
if (next_dup == NULL)
return skb;
if (before(next_dup->start_seq, skip_to_seq)) {
- skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
- skb = tcp_sacktag_walk(skb, sk, NULL,
- next_dup->start_seq, next_dup->end_seq,
- 1, fack_count, reord, flag);
+ skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
+ skb = tcp_sacktag_walk(skb, sk, NULL, state,
+ next_dup->start_seq, next_dup->end_seq,
+ 1);
}
return skb;
@@ -1445,16 +1718,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache;
+ struct tcp_sacktag_state state;
struct sk_buff *skb;
int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks;
- int reord = tp->packets_out;
- int flag = 0;
int found_dup_sack = 0;
- int fack_count;
int i, j;
int first_sack_index;
+ state.flag = 0;
+ state.reord = tp->packets_out;
+
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
tp->fackets_out = 0;
@@ -1464,7 +1738,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una);
if (found_dup_sack)
- flag |= FLAG_DSACKING_ACK;
+ state.flag |= FLAG_DSACKING_ACK;
/* Eliminate too old ACKs, but take into
* account more or less fresh ones, they can
@@ -1533,7 +1807,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
}
skb = tcp_write_queue_head(sk);
- fack_count = 0;
+ state.fack_count = 0;
i = 0;
if (!tp->sacked_out) {
@@ -1558,7 +1832,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
/* Event "B" in the comment above. */
if (after(end_seq, tp->high_seq))
- flag |= FLAG_DATA_LOST;
+ state.flag |= FLAG_DATA_LOST;
/* Skip too early cached blocks */
while (tcp_sack_cache_ok(tp, cache) &&
@@ -1571,13 +1845,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
/* Head todo? */
if (before(start_seq, cache->start_seq)) {
- skb = tcp_sacktag_skip(skb, sk, start_seq,
- &fack_count);
+ skb = tcp_sacktag_skip(skb, sk, &state,
+ start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup,
+ &state,
start_seq,
cache->start_seq,
- dup_sack, &fack_count,
- &reord, &flag);
+ dup_sack);
}
/* Rest of the block already fully processed? */
@@ -1585,9 +1859,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
goto advance_sp;
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
- cache->end_seq,
- &fack_count, &reord,
- &flag);
+ &state,
+ cache->end_seq);
/* ...tail remains todo... */
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
@@ -1595,13 +1868,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
skb = tcp_highest_sack(sk);
if (skb == NULL)
break;
- fack_count = tp->fackets_out;
+ state.fack_count = tp->fackets_out;
cache++;
goto walk;
}
- skb = tcp_sacktag_skip(skb, sk, cache->end_seq,
- &fack_count);
+ skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
/* Check overlap against next cached too (past this one already) */
cache++;
continue;
@@ -1611,20 +1883,20 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
skb = tcp_highest_sack(sk);
if (skb == NULL)
break;
- fack_count = tp->fackets_out;
+ state.fack_count = tp->fackets_out;
}
- skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count);
+ skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
walk:
- skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
- dup_sack, &fack_count, &reord, &flag);
+ skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+ start_seq, end_seq, dup_sack);
advance_sp:
/* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
* due to in-order walk
*/
if (after(end_seq, tp->frto_highmark))
- flag &= ~FLAG_ONLY_ORIG_SACKED;
+ state.flag &= ~FLAG_ONLY_ORIG_SACKED;
i++;
}
@@ -1641,10 +1913,10 @@ advance_sp:
tcp_verify_left_out(tp);
- if ((reord < tp->fackets_out) &&
+ if ((state.reord < tp->fackets_out) &&
((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
- tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+ tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
out:
@@ -1654,13 +1926,13 @@ out:
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
- return flag;
+ return state.flag;
}
/* Limits sacked_out so that sum with lost_out isn't ever larger than
* packets_out. Returns zero if sacked_out adjustement wasn't necessary.
*/
-int tcp_limit_reno_sacked(struct tcp_sock *tp)
+static int tcp_limit_reno_sacked(struct tcp_sock *tp)
{
u32 holes;
@@ -2336,9 +2608,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
- printk(KERN_DEBUG "Undo %s " NIPQUAD_FMT "/%u c%u l%u ss%u/%u p%u\n",
+ printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
msg,
- NIPQUAD(inet->daddr), ntohs(inet->dport),
+ &inet->daddr, ntohs(inet->dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
@@ -2346,9 +2618,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- printk(KERN_DEBUG "Undo %s " NIP6_FMT "/%u c%u l%u ss%u/%u p%u\n",
+ printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
msg,
- NIP6(np->daddr), ntohs(inet->dport),
+ &np->daddr, ntohs(inet->dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
@@ -2559,6 +2831,56 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
+/* Do a simple retransmit without using the backoff mechanisms in
+ * tcp_timer. This is used for path mtu discovery.
+ * The socket is already locked here.
+ */
+void tcp_simple_retransmit(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ unsigned int mss = tcp_current_mss(sk, 0);
+ u32 prior_lost = tp->lost_out;
+
+ tcp_for_write_queue(skb, sk) {
+ if (skb == tcp_send_head(sk))
+ break;
+ if (tcp_skb_seglen(skb) > mss &&
+ !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= tcp_skb_pcount(skb);
+ }
+ tcp_skb_mark_lost_uncond_verify(tp, skb);
+ }
+ }
+
+ tcp_clear_retrans_hints_partial(tp);
+
+ if (prior_lost == tp->lost_out)
+ return;
+
+ if (tcp_is_reno(tp))
+ tcp_limit_reno_sacked(tp);
+
+ tcp_verify_left_out(tp);
+
+ /* Don't muck with the congestion window here.
+ * Reason is that we do not increase amount of _data_
+ * in network, but units changed and effective
+ * cwnd/ssthresh really reduced now.
+ */
+ if (icsk->icsk_ca_state != TCP_CA_Loss) {
+ tp->high_seq = tp->snd_nxt;
+ tp->snd_ssthresh = tcp_current_ssthresh(sk);
+ tp->prior_ssthresh = 0;
+ tp->undo_marker = 0;
+ tcp_set_ca_state(sk, TCP_CA_Loss);
+ }
+ tcp_xmit_retransmit_queue(sk);
+}
+
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -2730,6 +3052,13 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
tcp_xmit_retransmit_queue(sk);
}
+static void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+{
+ tcp_rtt_estimator(sk, seq_rtt);
+ tcp_set_rto(sk);
+ inet_csk(sk)->icsk_backoff = 0;
+}
+
/* Read draft-ietf-tcplw-high-performance before mucking
* with this code. (Supersedes RFC1323)
*/
@@ -2751,11 +3080,8 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
* in window is lost... Voila. --ANK (010210)
*/
struct tcp_sock *tp = tcp_sk(sk);
- const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
- tcp_rtt_estimator(sk, seq_rtt);
- tcp_set_rto(sk);
- inet_csk(sk)->icsk_backoff = 0;
- tcp_bound_rto(sk);
+
+ tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
}
static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
@@ -2772,10 +3098,7 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
if (flag & FLAG_RETRANS_DATA_ACKED)
return;
- tcp_rtt_estimator(sk, seq_rtt);
- tcp_set_rto(sk);
- inet_csk(sk)->icsk_backoff = 0;
- tcp_bound_rto(sk);
+ tcp_valid_rtt_meas(sk, seq_rtt);
}
static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5c8fa7f1e32..10172487921 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -97,11 +97,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
}
#endif
-struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
- .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
- .lhash_users = ATOMIC_INIT(0),
- .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
-};
+struct inet_hashinfo tcp_hashinfo;
static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
{
@@ -492,7 +488,7 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
- csum_partial((char *)th,
+ csum_partial(th,
th->doff << 2,
skb->csum));
}
@@ -726,7 +722,7 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
th->check = tcp_v4_check(skb->len,
ireq->loc_addr,
ireq->rmt_addr,
- csum_partial((char *)th, skb->len,
+ csum_partial(th, skb->len,
skb->csum));
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
@@ -1139,10 +1135,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
- printk(KERN_INFO "MD5 Hash failed for "
- "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
- NIPQUAD(iph->saddr), ntohs(th->source),
- NIPQUAD(iph->daddr), ntohs(th->dest),
+ printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
+ &iph->saddr, ntohs(th->source),
+ &iph->daddr, ntohs(th->dest),
genhash ? " tcp_v4_calc_md5_hash failed" : "");
}
return 1;
@@ -1297,10 +1292,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* to destinations, already remembered
* to the moment of synflood.
*/
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
- "request from " NIPQUAD_FMT "/%u\n",
- NIPQUAD(saddr),
- ntohs(tcp_hdr(skb)->source));
+ LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
+ &saddr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
}
@@ -1804,7 +1797,7 @@ static int tcp_v4_init_sock(struct sock *sk)
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
- atomic_inc(&tcp_sockets_allocated);
+ percpu_counter_inc(&tcp_sockets_allocated);
return 0;
}
@@ -1852,7 +1845,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
sk->sk_sndmsg_page = NULL;
}
- atomic_dec(&tcp_sockets_allocated);
+ percpu_counter_dec(&tcp_sockets_allocated);
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -1860,32 +1853,35 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
-static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
+static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
{
- return hlist_empty(head) ? NULL :
+ return hlist_nulls_empty(head) ? NULL :
list_entry(head->first, struct inet_timewait_sock, tw_node);
}
static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
{
- return tw->tw_node.next ?
- hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
+ return !is_a_nulls(tw->tw_node.next) ?
+ hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
}
static void *listening_get_next(struct seq_file *seq, void *cur)
{
struct inet_connection_sock *icsk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
struct sock *sk = cur;
- struct tcp_iter_state* st = seq->private;
+ struct inet_listen_hashbucket *ilb;
+ struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
if (!sk) {
st->bucket = 0;
- sk = sk_head(&tcp_hashinfo.listening_hash[0]);
+ ilb = &tcp_hashinfo.listening_hash[0];
+ spin_lock_bh(&ilb->lock);
+ sk = sk_nulls_head(&ilb->head);
goto get_sk;
}
-
+ ilb = &tcp_hashinfo.listening_hash[st->bucket];
++st->num;
if (st->state == TCP_SEQ_STATE_OPENREQ) {
@@ -1918,7 +1914,7 @@ get_req:
sk = sk_next(sk);
}
get_sk:
- sk_for_each_from(sk, node) {
+ sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
cur = sk;
goto out;
@@ -1935,8 +1931,11 @@ start_req:
}
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
}
+ spin_unlock_bh(&ilb->lock);
if (++st->bucket < INET_LHTABLE_SIZE) {
- sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
+ ilb = &tcp_hashinfo.listening_hash[st->bucket];
+ spin_lock_bh(&ilb->lock);
+ sk = sk_nulls_head(&ilb->head);
goto get_sk;
}
cur = NULL;
@@ -1957,28 +1956,28 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
static inline int empty_bucket(struct tcp_iter_state *st)
{
- return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
- hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+ return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
+ hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
}
static void *established_get_first(struct seq_file *seq)
{
- struct tcp_iter_state* st = seq->private;
+ struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
void *rc = NULL;
for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
struct sock *sk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
- rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
/* Lockless fast path for the common case of empty buckets */
if (empty_bucket(st))
continue;
- read_lock_bh(lock);
- sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family ||
!net_eq(sock_net(sk), net)) {
continue;
@@ -1996,7 +1995,7 @@ static void *established_get_first(struct seq_file *seq)
rc = tw;
goto out;
}
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
st->state = TCP_SEQ_STATE_ESTABLISHED;
}
out:
@@ -2007,8 +2006,8 @@ static void *established_get_next(struct seq_file *seq, void *cur)
{
struct sock *sk = cur;
struct inet_timewait_sock *tw;
- struct hlist_node *node;
- struct tcp_iter_state* st = seq->private;
+ struct hlist_nulls_node *node;
+ struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
++st->num;
@@ -2024,7 +2023,7 @@ get_tw:
cur = tw;
goto out;
}
- read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+ spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Look for next non empty bucket */
@@ -2034,12 +2033,12 @@ get_tw:
if (st->bucket >= tcp_hashinfo.ehash_size)
return NULL;
- read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
- sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
+ spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+ sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
} else
- sk = sk_next(sk);
+ sk = sk_nulls_next(sk);
- sk_for_each_from(sk, node) {
+ sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
goto found;
}
@@ -2067,14 +2066,12 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos)
static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
void *rc;
- struct tcp_iter_state* st = seq->private;
+ struct tcp_iter_state *st = seq->private;
- inet_listen_lock(&tcp_hashinfo);
st->state = TCP_SEQ_STATE_LISTENING;
rc = listening_get_idx(seq, &pos);
if (!rc) {
- inet_listen_unlock(&tcp_hashinfo);
st->state = TCP_SEQ_STATE_ESTABLISHED;
rc = established_get_idx(seq, pos);
}
@@ -2084,7 +2081,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct tcp_iter_state* st = seq->private;
+ struct tcp_iter_state *st = seq->private;
st->state = TCP_SEQ_STATE_LISTENING;
st->num = 0;
return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
@@ -2093,7 +2090,7 @@ static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *rc = NULL;
- struct tcp_iter_state* st;
+ struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
rc = tcp_get_idx(seq, 0);
@@ -2106,7 +2103,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
case TCP_SEQ_STATE_LISTENING:
rc = listening_get_next(seq, v);
if (!rc) {
- inet_listen_unlock(&tcp_hashinfo);
st->state = TCP_SEQ_STATE_ESTABLISHED;
rc = established_get_first(seq);
}
@@ -2123,7 +2119,7 @@ out:
static void tcp_seq_stop(struct seq_file *seq, void *v)
{
- struct tcp_iter_state* st = seq->private;
+ struct tcp_iter_state *st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_OPENREQ:
@@ -2133,12 +2129,12 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
}
case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN)
- inet_listen_unlock(&tcp_hashinfo);
+ spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
break;
case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
- read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+ spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
break;
}
}
@@ -2284,7 +2280,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
static int tcp4_seq_show(struct seq_file *seq, void *v)
{
- struct tcp_iter_state* st;
+ struct tcp_iter_state *st;
int len;
if (v == SEQ_START_TOKEN) {
@@ -2350,6 +2346,41 @@ void tcp4_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
+struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr,
+ skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+
+ /* fall through */
+ case CHECKSUM_NONE:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ return tcp_gro_receive(head, skb);
+}
+EXPORT_SYMBOL(tcp4_gro_receive);
+
+int tcp4_gro_complete(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ return tcp_gro_complete(skb);
+}
+EXPORT_SYMBOL(tcp4_gro_complete);
+
struct proto tcp_prot = {
.name = "TCP",
.owner = THIS_MODULE,
@@ -2378,6 +2409,7 @@ struct proto tcp_prot = {
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock),
+ .slab_flags = SLAB_DESTROY_BY_RCU,
.twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops,
.h.hashinfo = &tcp_hashinfo,
@@ -2407,6 +2439,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
void __init tcp_v4_init(void)
{
+ inet_hashinfo_init(&tcp_hashinfo);
if (register_pernet_device(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 779f2e9d068..f67effbb102 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -491,7 +491,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
* as a request_sock.
*/
-struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct request_sock **prev)
{
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fe3b4bdfd25..557fe16cbfb 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -42,7 +42,7 @@
/* People can turn this off for buggy TCP's found in printers etc. */
int sysctl_tcp_retrans_collapse __read_mostly = 1;
-/* People can turn this on to work with those rare, broken TCPs that
+/* People can turn this on to work with those rare, broken TCPs that
* interpret the window field as a signed quantity.
*/
int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
@@ -484,7 +484,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
if (likely(sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
- if(likely(opts->ws))
+ if (likely(opts->ws))
size += TCPOLEN_WSCALE_ALIGNED;
}
if (likely(sysctl_tcp_sack)) {
@@ -526,7 +526,7 @@ static unsigned tcp_synack_options(struct sock *sk,
if (likely(ireq->wscale_ok)) {
opts->ws = ireq->rcv_wscale;
- if(likely(opts->ws))
+ if (likely(opts->ws))
size += TCPOLEN_WSCALE_ALIGNED;
}
if (likely(doing_ts)) {
@@ -663,10 +663,14 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->urg_ptr = 0;
/* The urg_mode check is necessary during a below snd_una win probe */
- if (unlikely(tcp_urg_mode(tp) &&
- between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) {
- th->urg_ptr = htons(tp->snd_up - tcb->seq);
- th->urg = 1;
+ if (unlikely(tcp_urg_mode(tp))) {
+ if (between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF)) {
+ th->urg_ptr = htons(tp->snd_up - tcb->seq);
+ th->urg = 1;
+ } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
+ th->urg_ptr = 0xFFFF;
+ th->urg = 1;
+ }
}
tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
@@ -1168,7 +1172,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
static inline int tcp_minshall_check(const struct tcp_sock *tp)
{
- return after(tp->snd_sml,tp->snd_una) &&
+ return after(tp->snd_sml, tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
@@ -1334,7 +1338,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* Defer for less than two clock ticks. */
if (tp->tso_deferred &&
- ((jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
+ (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1519,7 +1523,8 @@ static int tcp_mtu_probe(struct sock *sk)
* Returns 1, if no segments are in flight and we have queued segments, but
* cannot send anything now because of SWS or another problem.
*/
-static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
+static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int push_one, gfp_t gfp)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -1527,20 +1532,16 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
int cwnd_quota;
int result;
- /* If we are closed, the bytes will have to remain here.
- * In time closedown will finish, we empty the write queue and all
- * will be happy.
- */
- if (unlikely(sk->sk_state == TCP_CLOSE))
- return 0;
-
sent_pkts = 0;
- /* Do MTU probing. */
- if ((result = tcp_mtu_probe(sk)) == 0) {
- return 0;
- } else if (result > 0) {
- sent_pkts = 1;
+ if (!push_one) {
+ /* Do MTU probing. */
+ result = tcp_mtu_probe(sk);
+ if (!result) {
+ return 0;
+ } else if (result > 0) {
+ sent_pkts = 1;
+ }
}
while ((skb = tcp_send_head(sk))) {
@@ -1562,7 +1563,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
nonagle : TCP_NAGLE_PUSH))))
break;
} else {
- if (tcp_tso_should_defer(sk, skb))
+ if (!push_one && tcp_tso_should_defer(sk, skb))
break;
}
@@ -1577,7 +1578,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
+ if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
/* Advance the send_head. This one is sent out.
@@ -1587,6 +1588,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
tcp_minshall_update(tp, mss_now, skb);
sent_pkts++;
+
+ if (push_one)
+ break;
}
if (likely(sent_pkts)) {
@@ -1605,10 +1609,18 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
{
struct sk_buff *skb = tcp_send_head(sk);
- if (skb) {
- if (tcp_write_xmit(sk, cur_mss, nonagle))
- tcp_check_probe_timer(sk);
- }
+ if (!skb)
+ return;
+
+ /* If we are closed, the bytes will have to remain here.
+ * In time closedown will finish, we empty the write queue and
+ * all will be happy.
+ */
+ if (unlikely(sk->sk_state == TCP_CLOSE))
+ return;
+
+ if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
+ tcp_check_probe_timer(sk);
}
/* Send _single_ skb sitting at the send head. This function requires
@@ -1616,38 +1628,11 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
*/
void tcp_push_one(struct sock *sk, unsigned int mss_now)
{
- struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
- unsigned int tso_segs, cwnd_quota;
BUG_ON(!skb || skb->len < mss_now);
- tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
- cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
-
- if (likely(cwnd_quota)) {
- unsigned int limit;
-
- BUG_ON(!tso_segs);
-
- limit = mss_now;
- if (tso_segs > 1 && !tcp_urg_mode(tp))
- limit = tcp_mss_split_point(sk, skb, mss_now,
- cwnd_quota);
-
- if (skb->len > limit &&
- unlikely(tso_fragment(sk, skb, limit, mss_now)))
- return;
-
- /* Send it out now. */
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
-
- if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
- tcp_event_new_data_sent(sk, skb);
- tcp_cwnd_validate(sk);
- return;
- }
- }
+ tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
}
/* This function returns the amount that we can raise the
@@ -1767,46 +1752,22 @@ u32 __tcp_select_window(struct sock *sk)
return window;
}
-/* Attempt to collapse two adjacent SKB's during retransmission. */
-static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
- int mss_now)
+/* Collapses two adjacent SKB's during retransmission. */
+static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
int skb_size, next_skb_size;
u16 flags;
- /* The first test we must make is that neither of these two
- * SKB's are still referenced by someone else.
- */
- if (skb_cloned(skb) || skb_cloned(next_skb))
- return;
-
skb_size = skb->len;
next_skb_size = next_skb->len;
flags = TCP_SKB_CB(skb)->flags;
- /* Also punt if next skb has been SACK'd. */
- if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
- return;
-
- /* Next skb is out of window. */
- if (after(TCP_SKB_CB(next_skb)->end_seq, tcp_wnd_end(tp)))
- return;
-
- /* Punt if not enough space exists in the first SKB for
- * the data in the second, or the total combined payload
- * would exceed the MSS.
- */
- if ((next_skb_size > skb_tailroom(skb)) ||
- ((skb_size + next_skb_size) > mss_now))
- return;
-
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
tcp_highest_sack_combine(sk, next_skb, skb);
- /* Ok. We will be able to collapse the packet. */
tcp_unlink_write_queue(next_skb, sk);
skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
@@ -1848,54 +1809,60 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
sk_wmem_free_skb(sk, next_skb);
}
-/* Do a simple retransmit without using the backoff mechanisms in
- * tcp_timer. This is used for path mtu discovery.
- * The socket is already locked here.
- */
-void tcp_simple_retransmit(struct sock *sk)
+static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
+{
+ if (tcp_skb_pcount(skb) > 1)
+ return 0;
+ /* TODO: SACK collapsing could be used to remove this condition */
+ if (skb_shinfo(skb)->nr_frags != 0)
+ return 0;
+ if (skb_cloned(skb))
+ return 0;
+ if (skb == tcp_send_head(sk))
+ return 0;
+ /* Some heurestics for collapsing over SACK'd could be invented */
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+ return 0;
+
+ return 1;
+}
+
+static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
+ int space)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
- unsigned int mss = tcp_current_mss(sk, 0);
- u32 prior_lost = tp->lost_out;
+ struct sk_buff *skb = to, *tmp;
+ int first = 1;
- tcp_for_write_queue(skb, sk) {
- if (skb == tcp_send_head(sk))
+ if (!sysctl_tcp_retrans_collapse)
+ return;
+ if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
+ return;
+
+ tcp_for_write_queue_from_safe(skb, tmp, sk) {
+ if (!tcp_can_collapse(sk, skb))
break;
- if (skb->len > mss &&
- !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
- }
- tcp_skb_mark_lost_uncond_verify(tp, skb);
- }
- }
- tcp_clear_retrans_hints_partial(tp);
+ space -= skb->len;
- if (prior_lost == tp->lost_out)
- return;
+ if (first) {
+ first = 0;
+ continue;
+ }
- if (tcp_is_reno(tp))
- tcp_limit_reno_sacked(tp);
+ if (space < 0)
+ break;
+ /* Punt if not enough space exists in the first SKB for
+ * the data in the second
+ */
+ if (skb->len > skb_tailroom(to))
+ break;
- tcp_verify_left_out(tp);
+ if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
+ break;
- /* Don't muck with the congestion window here.
- * Reason is that we do not increase amount of _data_
- * in network, but units changed and effective
- * cwnd/ssthresh really reduced now.
- */
- if (icsk->icsk_ca_state != TCP_CA_Loss) {
- tp->high_seq = tp->snd_nxt;
- tp->snd_ssthresh = tcp_current_ssthresh(sk);
- tp->prior_ssthresh = 0;
- tp->undo_marker = 0;
- tcp_set_ca_state(sk, TCP_CA_Loss);
+ tcp_collapse_retrans(sk, to);
}
- tcp_xmit_retransmit_queue(sk);
}
/* This retransmits one SKB. Policy decisions and retransmit queue
@@ -1947,17 +1914,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
return -ENOMEM; /* We'll try again later. */
}
- /* Collapse two adjacent packets if worthwhile and we can. */
- if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
- (skb->len < (cur_mss >> 1)) &&
- (!tcp_skb_is_last(sk, skb)) &&
- (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
- (skb_shinfo(skb)->nr_frags == 0 &&
- skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
- (tcp_skb_pcount(skb) == 1 &&
- tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
- (sysctl_tcp_retrans_collapse != 0))
- tcp_retrans_try_collapse(sk, skb, cur_mss);
+ tcp_retrans_try_collapse(sk, skb, cur_mss);
/* Some Solaris stacks overoptimize and ignore the FIN on a
* retransmit when old data is attached. So strip it off
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 7ddc30f0744..25524d4e372 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -153,12 +153,11 @@ static int tcpprobe_sprint(char *tbuf, int n)
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
return snprintf(tbuf, n,
- "%lu.%09lu " NIPQUAD_FMT ":%u " NIPQUAD_FMT ":%u"
- " %d %#x %#x %u %u %u %u\n",
+ "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
- NIPQUAD(p->saddr), ntohs(p->sport),
- NIPQUAD(p->daddr), ntohs(p->dport),
+ &p->saddr, ntohs(p->sport),
+ &p->daddr, ntohs(p->dport),
p->length, p->snd_nxt, p->snd_una,
p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6b6dff1164b..0170e914f1b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -65,7 +65,7 @@ static void tcp_write_err(struct sock *sk)
static int tcp_out_of_resources(struct sock *sk, int do_reset)
{
struct tcp_sock *tp = tcp_sk(sk);
- int orphans = atomic_read(&tcp_orphan_count);
+ int orphans = percpu_counter_read_positive(&tcp_orphan_count);
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
@@ -171,7 +171,7 @@ static int tcp_write_timeout(struct sock *sk)
static void tcp_delack_timer(unsigned long data)
{
- struct sock *sk = (struct sock*)data;
+ struct sock *sk = (struct sock *)data;
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -299,15 +299,15 @@ static void tcp_retransmit_timer(struct sock *sk)
#ifdef TCP_DEBUG
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIPQUAD_FMT ":%u/%u shrinks window %u:%u. Repaired.\n",
- NIPQUAD(inet->daddr), ntohs(inet->dport),
+ LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
+ &inet->daddr, ntohs(inet->dport),
inet->num, tp->snd_una, tp->snd_nxt);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIP6_FMT ":%u/%u shrinks window %u:%u. Repaired.\n",
- NIP6(np->daddr), ntohs(inet->dport),
+ LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
+ &np->daddr, ntohs(inet->dport),
inet->num, tp->snd_una, tp->snd_nxt);
}
#endif
@@ -396,7 +396,7 @@ out:;
static void tcp_write_timer(unsigned long data)
{
- struct sock *sk = (struct sock*)data;
+ struct sock *sk = (struct sock *)data;
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index e03b10183a8..9ec843a9bbb 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -83,7 +83,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
else if (!yeah->doing_reno_now) {
/* Scalable */
- tp->snd_cwnd_cnt+=yeah->pkts_acked;
+ tp->snd_cwnd_cnt += yeah->pkts_acked;
if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
@@ -224,7 +224,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
} else
- reduction = max(tp->snd_cwnd>>1,2U);
+ reduction = max(tp->snd_cwnd>>1, 2U);
yeah->fast_count = 0;
yeah->reno_count = max(yeah->reno_count>>1, 2U);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 98c1fd09be8..cf5ab0581eb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -81,6 +81,8 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
@@ -104,12 +106,8 @@
#include <net/xfrm.h>
#include "udp_impl.h"
-/*
- * Snmp MIB for the UDP layer
- */
-
-struct hlist_head udp_hash[UDP_HTABLE_SIZE];
-DEFINE_RWLOCK(udp_hash_lock);
+struct udp_table udp_table;
+EXPORT_SYMBOL(udp_table);
int sysctl_udp_mem[3] __read_mostly;
int sysctl_udp_rmem_min __read_mostly;
@@ -123,15 +121,15 @@ atomic_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
static int udp_lib_lport_inuse(struct net *net, __u16 num,
- const struct hlist_head udptable[],
+ const struct udp_hslot *hslot,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2))
{
struct sock *sk2;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
- sk_for_each(sk2, node, &udptable[udp_hashfn(net, num)])
+ sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
sk2->sk_hash == num &&
@@ -154,12 +152,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2 ) )
{
- struct hlist_head *udptable = sk->sk_prot->h.udp_hash;
+ struct udp_hslot *hslot;
+ struct udp_table *udptable = sk->sk_prot->h.udp_table;
int error = 1;
struct net *net = sock_net(sk);
- write_lock_bh(&udp_hash_lock);
-
if (!snum) {
int low, high, remaining;
unsigned rand;
@@ -171,26 +168,34 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
rand = net_random();
snum = first = rand % remaining + low;
rand |= 1;
- while (udp_lib_lport_inuse(net, snum, udptable, sk,
- saddr_comp)) {
+ for (;;) {
+ hslot = &udptable->hash[udp_hashfn(net, snum)];
+ spin_lock_bh(&hslot->lock);
+ if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
+ break;
+ spin_unlock_bh(&hslot->lock);
do {
snum = snum + rand;
} while (snum < low || snum > high);
if (snum == first)
goto fail;
}
- } else if (udp_lib_lport_inuse(net, snum, udptable, sk, saddr_comp))
- goto fail;
-
+ } else {
+ hslot = &udptable->hash[udp_hashfn(net, snum)];
+ spin_lock_bh(&hslot->lock);
+ if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
+ goto fail_unlock;
+ }
inet_sk(sk)->num = snum;
sk->sk_hash = snum;
if (sk_unhashed(sk)) {
- sk_add_node(sk, &udptable[udp_hashfn(net, snum)]);
+ sk_nulls_add_node_rcu(sk, &hslot->head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
}
error = 0;
+fail_unlock:
+ spin_unlock_bh(&hslot->lock);
fail:
- write_unlock_bh(&udp_hash_lock);
return error;
}
@@ -208,63 +213,91 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
}
+static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
+ unsigned short hnum,
+ __be16 sport, __be32 daddr, __be16 dport, int dif)
+{
+ int score = -1;
+
+ if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
+ !ipv6_only_sock(sk)) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ score = (sk->sk_family == PF_INET ? 1 : 0);
+ if (inet->rcv_saddr) {
+ if (inet->rcv_saddr != daddr)
+ return -1;
+ score += 2;
+ }
+ if (inet->daddr) {
+ if (inet->daddr != saddr)
+ return -1;
+ score += 2;
+ }
+ if (inet->dport) {
+ if (inet->dport != sport)
+ return -1;
+ score += 2;
+ }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score += 2;
+ }
+ }
+ return score;
+}
+
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport,
- int dif, struct hlist_head udptable[])
+ int dif, struct udp_table *udptable)
{
- struct sock *sk, *result = NULL;
- struct hlist_node *node;
+ struct sock *sk, *result;
+ struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
- int badness = -1;
-
- read_lock(&udp_hash_lock);
- sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) {
- struct inet_sock *inet = inet_sk(sk);
-
- if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
- !ipv6_only_sock(sk)) {
- int score = (sk->sk_family == PF_INET ? 1 : 0);
- if (inet->rcv_saddr) {
- if (inet->rcv_saddr != daddr)
- continue;
- score+=2;
- }
- if (inet->daddr) {
- if (inet->daddr != saddr)
- continue;
- score+=2;
- }
- if (inet->dport) {
- if (inet->dport != sport)
- continue;
- score+=2;
- }
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- continue;
- score+=2;
- }
- if (score == 9) {
- result = sk;
- break;
- } else if (score > badness) {
- result = sk;
- badness = score;
- }
+ unsigned int hash = udp_hashfn(net, hnum);
+ struct udp_hslot *hslot = &udptable->hash[hash];
+ int score, badness;
+
+ rcu_read_lock();
+begin:
+ result = NULL;
+ badness = -1;
+ sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+ score = compute_score(sk, net, saddr, hnum, sport,
+ daddr, dport, dif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
}
}
- if (result)
- sock_hold(result);
- read_unlock(&udp_hash_lock);
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != hash)
+ goto begin;
+
+ if (result) {
+ if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ result = NULL;
+ else if (unlikely(compute_score(result, net, saddr, hnum, sport,
+ daddr, dport, dif) < badness)) {
+ sock_put(result);
+ goto begin;
+ }
+ }
+ rcu_read_unlock();
return result;
}
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
- struct hlist_head udptable[])
+ struct udp_table *udptable)
{
struct sock *sk;
const struct iphdr *iph = ip_hdr(skb);
@@ -280,7 +313,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
- return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, udp_hash);
+ return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
@@ -289,11 +322,11 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
struct sock *s = sk;
unsigned short hnum = ntohs(loc_port);
- sk_for_each_from(s, node) {
+ sk_nulls_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s);
if (!net_eq(sock_net(s), net) ||
@@ -324,7 +357,7 @@ found:
* to find the appropriate port.
*/
-void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
+void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{
struct inet_sock *inet;
struct iphdr *iph = (struct iphdr*)skb->data;
@@ -393,7 +426,7 @@ out:
void udp_err(struct sk_buff *skb, u32 info)
{
- __udp4_lib_err(skb, info, udp_hash);
+ __udp4_lib_err(skb, info, &udp_table);
}
/*
@@ -686,7 +719,7 @@ do_append_data:
up->len += ulen;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
- sizeof(struct udphdr), &ipc, rt,
+ sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
@@ -935,6 +968,23 @@ int udp_disconnect(struct sock *sk, int flags)
return 0;
}
+void udp_lib_unhash(struct sock *sk)
+{
+ if (sk_hashed(sk)) {
+ struct udp_table *udptable = sk->sk_prot->h.udp_table;
+ unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash);
+ struct udp_hslot *hslot = &udptable->hash[hash];
+
+ spin_lock_bh(&hslot->lock);
+ if (sk_nulls_del_node_init_rcu(sk)) {
+ inet_sk(sk)->num = 0;
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ }
+ spin_unlock_bh(&hslot->lock);
+ }
+}
+EXPORT_SYMBOL(udp_lib_unhash);
+
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int is_udplite = IS_UDPLITE(sk);
@@ -1073,13 +1123,14 @@ drop:
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
- struct hlist_head udptable[])
+ struct udp_table *udptable)
{
struct sock *sk;
+ struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))];
int dif;
- read_lock(&udp_hash_lock);
- sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
+ spin_lock(&hslot->lock);
+ sk = sk_nulls_head(&hslot->head);
dif = skb->dev->ifindex;
sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
if (sk) {
@@ -1088,7 +1139,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
do {
struct sk_buff *skb1 = skb;
- sknext = udp_v4_mcast_next(net, sk_next(sk), uh->dest,
+ sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
daddr, uh->source, saddr,
dif);
if (sknext)
@@ -1105,7 +1156,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
} while (sknext);
} else
kfree_skb(skb);
- read_unlock(&udp_hash_lock);
+ spin_unlock(&hslot->lock);
return 0;
}
@@ -1151,7 +1202,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
* All we need to do is get the socket, and then do a checksum.
*/
-int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
+int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct sock *sk;
@@ -1219,13 +1270,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
return 0;
short_packet:
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
- NIPQUAD(saddr),
+ &saddr,
ntohs(uh->source),
ulen,
skb->len,
- NIPQUAD(daddr),
+ &daddr,
ntohs(uh->dest));
goto drop;
@@ -1234,11 +1285,11 @@ csum_error:
* RFC1122: OK. Discards the bad packet silently (as far as
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
- NIPQUAD(saddr),
+ &saddr,
ntohs(uh->source),
- NIPQUAD(daddr),
+ &daddr,
ntohs(uh->dest),
ulen);
drop:
@@ -1249,7 +1300,7 @@ drop:
int udp_rcv(struct sk_buff *skb)
{
- return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
+ return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
void udp_destroy_sock(struct sock *sk)
@@ -1491,7 +1542,8 @@ struct proto udp_prot = {
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp_sock),
- .h.udp_hash = udp_hash,
+ .slab_flags = SLAB_DESTROY_BY_RCU,
+ .h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
@@ -1501,20 +1553,23 @@ struct proto udp_prot = {
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
-static struct sock *udp_get_first(struct seq_file *seq)
+static struct sock *udp_get_first(struct seq_file *seq, int start)
{
struct sock *sk;
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
- for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
- struct hlist_node *node;
- sk_for_each(sk, node, state->hashtable + state->bucket) {
+ for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
+ struct hlist_nulls_node *node;
+ struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
+ spin_lock_bh(&hslot->lock);
+ sk_nulls_for_each(sk, node, &hslot->head) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == state->family)
goto found;
}
+ spin_unlock_bh(&hslot->lock);
}
sk = NULL;
found:
@@ -1527,21 +1582,19 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
struct net *net = seq_file_net(seq);
do {
- sk = sk_next(sk);
-try_again:
- ;
+ sk = sk_nulls_next(sk);
} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
- if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
- sk = sk_head(state->hashtable + state->bucket);
- goto try_again;
+ if (!sk) {
+ spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
+ return udp_get_first(seq, state->bucket + 1);
}
return sk;
}
static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
- struct sock *sk = udp_get_first(seq);
+ struct sock *sk = udp_get_first(seq, 0);
if (sk)
while (pos && (sk = udp_get_next(seq, sk)) != NULL)
@@ -1550,9 +1603,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
}
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(udp_hash_lock)
{
- read_lock(&udp_hash_lock);
return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
@@ -1570,9 +1621,11 @@ static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void udp_seq_stop(struct seq_file *seq, void *v)
- __releases(udp_hash_lock)
{
- read_unlock(&udp_hash_lock);
+ struct udp_iter_state *state = seq->private;
+
+ if (state->bucket < UDP_HTABLE_SIZE)
+ spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
}
static int udp_seq_open(struct inode *inode, struct file *file)
@@ -1588,7 +1641,7 @@ static int udp_seq_open(struct inode *inode, struct file *file)
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
- s->hashtable = afinfo->hashtable;
+ s->udp_table = afinfo->udp_table;
return err;
}
@@ -1660,7 +1713,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
static struct udp_seq_afinfo udp4_seq_afinfo = {
.name = "udp",
.family = AF_INET,
- .hashtable = udp_hash,
+ .udp_table = &udp_table,
.seq_fops = {
.owner = THIS_MODULE,
},
@@ -1695,16 +1748,28 @@ void udp4_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
+void __init udp_table_init(struct udp_table *table)
+{
+ int i;
+
+ for (i = 0; i < UDP_HTABLE_SIZE; i++) {
+ INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
+ spin_lock_init(&table->hash[i].lock);
+ }
+}
+
void __init udp_init(void)
{
- unsigned long limit;
+ unsigned long nr_pages, limit;
+ udp_table_init(&udp_table);
/* Set the pressure threshold up by the same strategy of TCP. It is a
* fraction of global memory that is up to 1/2 at 256 MB, decreasing
* toward zero with the amount of memory, with a floor of 128 pages.
*/
- limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ nr_pages = totalram_pages - totalhigh_pages;
+ limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
+ limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
limit = max(limit, 128UL);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
@@ -1715,8 +1780,6 @@ void __init udp_init(void)
}
EXPORT_SYMBOL(udp_disconnect);
-EXPORT_SYMBOL(udp_hash);
-EXPORT_SYMBOL(udp_hash_lock);
EXPORT_SYMBOL(udp_ioctl);
EXPORT_SYMBOL(udp_prot);
EXPORT_SYMBOL(udp_sendmsg);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 2e9bad2fa1b..9f4a6165f72 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -5,8 +5,8 @@
#include <net/protocol.h>
#include <net/inet_common.h>
-extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int );
-extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []);
+extern int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int );
+extern void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
extern int udp_v4_get_port(struct sock *sk, unsigned short snum);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 3c807964da9..c784891cb7e 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -12,16 +12,17 @@
*/
#include "udp_impl.h"
-struct hlist_head udplite_hash[UDP_HTABLE_SIZE];
+struct udp_table udplite_table;
+EXPORT_SYMBOL(udplite_table);
static int udplite_rcv(struct sk_buff *skb)
{
- return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE);
+ return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
}
static void udplite_err(struct sk_buff *skb, u32 info)
{
- __udp4_lib_err(skb, info, udplite_hash);
+ __udp4_lib_err(skb, info, &udplite_table);
}
static struct net_protocol udplite_protocol = {
@@ -50,7 +51,8 @@ struct proto udplite_prot = {
.unhash = udp_lib_unhash,
.get_port = udp_v4_get_port,
.obj_size = sizeof(struct udp_sock),
- .h.udp_hash = udplite_hash,
+ .slab_flags = SLAB_DESTROY_BY_RCU,
+ .h.udp_table = &udplite_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
@@ -71,7 +73,7 @@ static struct inet_protosw udplite4_protosw = {
static struct udp_seq_afinfo udplite4_seq_afinfo = {
.name = "udplite",
.family = AF_INET,
- .hashtable = udplite_hash,
+ .udp_table = &udplite_table,
.seq_fops = {
.owner = THIS_MODULE,
},
@@ -108,6 +110,7 @@ static inline int udplite4_proc_init(void)
void __init udplite4_register(void)
{
+ udp_table_init(&udplite_table);
if (proto_register(&udplite_prot, 1))
goto out_register_err;
@@ -126,5 +129,4 @@ out_register_err:
printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__);
}
-EXPORT_SYMBOL(udplite_hash);
EXPORT_SYMBOL(udplite_prot);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 390dcb1354a..4ec2162a437 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -78,7 +78,6 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
struct udphdr *uh;
struct iphdr *iph;
int iphlen, len;
- int ret;
__u8 *udpdata;
__be32 *udpdata32;
@@ -152,8 +151,7 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
skb_reset_transport_header(skb);
/* process ESP */
- ret = xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type);
- return ret;
+ return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type);
drop:
kfree_skb(skb);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index c63de0a72ab..2ad24ba31f9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -18,7 +18,8 @@
static struct dst_ops xfrm4_dst_ops;
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
-static struct dst_entry *xfrm4_dst_lookup(int tos, xfrm_address_t *saddr,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+ xfrm_address_t *saddr,
xfrm_address_t *daddr)
{
struct flowi fl = {
@@ -36,19 +37,20 @@ static struct dst_entry *xfrm4_dst_lookup(int tos, xfrm_address_t *saddr,
if (saddr)
fl.fl4_src = saddr->a4;
- err = __ip_route_output_key(&init_net, &rt, &fl);
+ err = __ip_route_output_key(net, &rt, &fl);
dst = &rt->u.dst;
if (err)
dst = ERR_PTR(err);
return dst;
}
-static int xfrm4_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr)
+static int xfrm4_get_saddr(struct net *net,
+ xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct rtable *rt;
- dst = xfrm4_dst_lookup(0, NULL, daddr);
+ dst = xfrm4_dst_lookup(net, 0, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
@@ -65,7 +67,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
read_lock_bh(&policy->lock);
for (dst = policy->bundles; dst; dst = dst->next) {
- struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
xdst->u.rt.fl.fl4_src == fl->fl4_src &&
@@ -187,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
static inline int xfrm4_garbage_collect(struct dst_ops *ops)
{
- xfrm4_policy_afinfo.garbage_collect();
+ xfrm4_policy_afinfo.garbage_collect(&init_net);
return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2);
}
@@ -246,7 +248,6 @@ static struct dst_ops xfrm4_dst_ops = {
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
.gc_thresh = 1024,
- .entry_size = sizeof(struct xfrm_dst),
.entries = ATOMIC_INIT(0),
};
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 55dc6beab9a..1ef1366a0a0 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -13,8 +13,6 @@
#include <linux/ipsec.h>
#include <linux/netfilter_ipv4.h>
-static struct xfrm_state_afinfo xfrm4_state_afinfo;
-
static int xfrm4_init_flags(struct xfrm_state *x)
{
if (ipv4_config.no_pmtu_disc)