aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Westgaard Ry <hans.westgaard.ry@oracle.com>2016-02-03 09:26:57 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-08 09:43:05 +0100
commitdf93393787a3fc3b994c5061a890b0250bae0fbc (patch)
tree8a971649ccb5573fcb38204db242baa00b374cfe
parent1115c5f029831fd3a9f447094069bb01802418a1 (diff)
downloadlinux-linaro-stable-df93393787a3fc3b994c5061a890b0250bae0fbc.tar.gz
net:Add sysctl_max_skb_frags
commit 5f74f82ea34c0da80ea0b49192bb5ea06e063593 upstream. Devices may have limits on the number of fragments in an skb they support. Current codebase uses a constant as maximum for number of fragments one skb can hold and use. When enabling scatter/gather and running traffic with many small messages the codebase uses the maximum number of fragments and may thereby violate the max for certain devices. The patch introduces a global variable as max number of fragments. Signed-off-by: Hans Westgaard Ry <hans.westgaard.ry@oracle.com> Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/ipv4/tcp.c4
4 files changed, 15 insertions, 2 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3b57c6712495..2ff757f2d3a3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -188,6 +188,7 @@ struct sk_buff;
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
+extern int sysctl_max_skb_frags;
typedef struct skb_frag_struct skb_frag_t;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2921c0d5608..97549212e9e3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -77,6 +77,8 @@
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
/**
* skb_panic - private function for out-of-line support
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index e731c96eac4b..cd386d2fd039 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -27,6 +27,7 @@ static int one = 1;
static int ushort_max = USHRT_MAX;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
@@ -363,6 +364,15 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "max_skb_frags",
+ .data = &sysctl_max_skb_frags,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ .extra2 = &max_skb_frags,
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 32b25cc96fff..a21c47289765 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -939,7 +939,7 @@ new_segment:
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, page, offset);
- if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+ if (!can_coalesce && i >= sysctl_max_skb_frags) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -1225,7 +1225,7 @@ new_segment:
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i == MAX_SKB_FRAGS || !sg) {
+ if (i == sysctl_max_skb_frags || !sg) {
tcp_mark_push(tp, skb);
goto new_segment;
}