blob: 4a4e8746d1b2841b9306955453e2dbe7bbe5daaf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes: Pedro Roque : Retransmit queue handled by TCP.
23 * : Fragmentation on mtu decrease
24 * : Segment collapse on retransmit
25 * : AF independence
26 *
27 * Linus Torvalds : send_delayed_ack
28 * David S. Miller : Charge memory using the right skb
29 * during syn/ack processing.
30 * David S. Miller : Output engine completely rewritten.
31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
32 * Cacophonix Gaul : draft-minshall-nagle-01
33 * J Hadi Salim : ECN support
34 *
35 */
36
Joe Perches91df42b2012-05-15 14:11:54 +000037#define pr_fmt(fmt) "TCP: " fmt
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <net/tcp.h>
40
41#include <linux/compiler.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/* People can turn this off for buggy TCP's found in printers etc. */
Brian Haleyab32ea52006-09-22 14:15:41 -070046int sysctl_tcp_retrans_collapse __read_mostly = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Jianjun Kong09cb1052008-11-03 00:27:11 -080048/* People can turn this on to work with those rare, broken TCPs that
Rick Jones15d99e02006-03-20 22:40:29 -080049 * interpret the window field as a signed quantity.
50 */
Brian Haleyab32ea52006-09-22 14:15:41 -070051int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
Rick Jones15d99e02006-03-20 22:40:29 -080052
Eric Dumazet46d3cea2012-07-11 05:50:31 +000053/* Default TSQ limit of two TSO segments */
54int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* This limits the percentage of the congestion window which we
57 * will allow a single TSO frame to consume. Building TSO frames
58 * which are too large can cause TCP streams to be bursty.
59 */
Brian Haleyab32ea52006-09-22 14:15:41 -070060int sysctl_tcp_tso_win_divisor __read_mostly = 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Brian Haleyab32ea52006-09-22 14:15:41 -070062int sysctl_tcp_mtu_probing __read_mostly = 0;
Shan Wei97b1ce22010-12-01 18:04:50 +000063int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
John Heffner5d424d52006-03-20 17:53:41 -080064
David S. Miller35089bb2006-06-13 22:33:04 -070065/* By default, RFC2861 behavior. */
Brian Haleyab32ea52006-09-22 14:15:41 -070066int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
David S. Miller35089bb2006-06-13 22:33:04 -070067
Eric Dumazet46d3cea2012-07-11 05:50:31 +000068static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
69 int push_one, gfp_t gfp);
William Allen Simpson519855c2009-12-02 18:14:19 +000070
Andi Kleen67edfef2009-07-21 23:00:40 +000071/* Account for new data that has been sent to the network. */
Eric Dumazetcf533ea2011-10-21 05:22:42 -040072static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
Ilpo Järvinen6ff03ac2007-08-24 22:44:06 -070073{
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +000074 struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen6ff03ac2007-08-24 22:44:06 -070075 struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -080076 unsigned int prior_packets = tp->packets_out;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -070077
David S. Millerfe067e82007-03-07 12:12:44 -080078 tcp_advance_send_head(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
Ilpo Järvinen85124302007-11-26 20:17:38 +080080
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -080081 tp->packets_out += tcp_skb_pcount(skb);
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +000082 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +000083 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Yuchung Cheng750ea2b2012-05-02 13:30:04 +000084 tcp_rearm_rto(sk);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +000085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88/* SND.NXT, if window was not shrunk.
89 * If window has been shrunk, what should we make? It is not clear at all.
90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
92 * invalid. OK, let's make this for now:
93 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -040094static inline __u32 tcp_acceptable_seq(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Eric Dumazetcf533ea2011-10-21 05:22:42 -040096 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -070097
Ilpo Järvinen90840de2007-12-31 04:48:41 -080098 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 return tp->snd_nxt;
100 else
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800101 return tcp_wnd_end(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
104/* Calculate mss to advertise in SYN segment.
105 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
106 *
107 * 1. It is independent of path mtu.
108 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
109 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
110 * attached devices, because some buggy hosts are confused by
111 * large MSS.
112 * 4. We do not make 3, we advertise MSS, calculated from first
113 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
114 * This may be overridden via information stored in routing table.
115 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
116 * probably even Jumbo".
117 */
118static __u16 tcp_advertise_mss(struct sock *sk)
119{
120 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400121 const struct dst_entry *dst = __sk_dst_get(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 int mss = tp->advmss;
123
David S. Miller0dbaee32010-12-13 12:52:14 -0800124 if (dst) {
125 unsigned int metric = dst_metric_advmss(dst);
126
127 if (metric < mss) {
128 mss = metric;
129 tp->advmss = mss;
130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 }
132
133 return (__u16)mss;
134}
135
136/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
137 * This is the first part of cwnd validation mechanism. */
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400138static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700140 struct tcp_sock *tp = tcp_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 s32 delta = tcp_time_stamp - tp->lsndtime;
142 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
143 u32 cwnd = tp->snd_cwnd;
144
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300145 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300147 tp->snd_ssthresh = tcp_current_ssthresh(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 restart_cwnd = min(restart_cwnd, cwnd);
149
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700150 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 cwnd >>= 1;
152 tp->snd_cwnd = max(cwnd, restart_cwnd);
153 tp->snd_cwnd_stamp = tcp_time_stamp;
154 tp->snd_cwnd_used = 0;
155}
156
Andi Kleen67edfef2009-07-21 23:00:40 +0000157/* Congestion state accounting after a packet has been sent. */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800158static void tcp_event_data_sent(struct tcp_sock *tp,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400159 struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700161 struct inet_connection_sock *icsk = inet_csk(sk);
162 const u32 now = tcp_time_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
David S. Miller35089bb2006-06-13 22:33:04 -0700164 if (sysctl_tcp_slow_start_after_idle &&
165 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700166 tcp_cwnd_restart(sk, __sk_dst_get(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 tp->lsndtime = now;
169
170 /* If it is a reply for ato after last received
171 * packet, enter pingpong mode.
172 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700173 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
174 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
Andi Kleen67edfef2009-07-21 23:00:40 +0000177/* Account for an ACK we sent. */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800178static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700180 tcp_dec_quickack_mode(sk, pkts);
181 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
184/* Determine a window scaling and initial window to offer.
185 * Based on the assumption that the given amount of space
186 * will be offered. Store the results in the tp structure.
187 * NOTE: for smooth operation initial space offering should
188 * be a multiple of mss if possible. We assume here that mss >= 1.
189 * This MUST be enforced by all callers.
190 */
191void tcp_select_initial_window(int __space, __u32 mss,
192 __u32 *rcv_wnd, __u32 *window_clamp,
laurent chavey31d12922009-12-15 11:15:28 +0000193 int wscale_ok, __u8 *rcv_wscale,
194 __u32 init_rcv_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
196 unsigned int space = (__space < 0 ? 0 : __space);
197
198 /* If no clamp set the clamp to the max possible scaled window */
199 if (*window_clamp == 0)
200 (*window_clamp) = (65535 << 14);
201 space = min(*window_clamp, space);
202
203 /* Quantize space offering to a multiple of mss if possible. */
204 if (space > mss)
205 space = (space / mss) * mss;
206
207 /* NOTE: offering an initial window larger than 32767
Rick Jones15d99e02006-03-20 22:40:29 -0800208 * will break some buggy TCP stacks. If the admin tells us
209 * it is likely we could be speaking with such a buggy stack
210 * we will truncate our initial window offering to 32K-1
211 * unless the remote has sent us a window scaling option,
212 * which we interpret as a sign the remote TCP is not
213 * misinterpreting the window field as a signed quantity.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 */
Rick Jones15d99e02006-03-20 22:40:29 -0800215 if (sysctl_tcp_workaround_signed_windows)
216 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
217 else
218 (*rcv_wnd) = space;
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 (*rcv_wscale) = 0;
221 if (wscale_ok) {
222 /* Set window scaling on max possible window
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900223 * See RFC1323 for an explanation of the limit to 14
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 */
225 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
Stephen Hemminger316c1592006-08-22 00:06:11 -0700226 space = min_t(u32, space, *window_clamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 while (space > 65535 && (*rcv_wscale) < 14) {
228 space >>= 1;
229 (*rcv_wscale)++;
230 }
231 }
232
Nandita Dukkipati356f0392010-12-20 14:15:56 +0000233 /* Set initial window to a value enough for senders starting with
234 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
235 * a limit on the initial window when mss is larger than 1460.
236 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800237 if (mss > (1 << *rcv_wscale)) {
Nandita Dukkipati356f0392010-12-20 14:15:56 +0000238 int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
239 if (mss > 1460)
240 init_cwnd =
241 max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
laurent chavey31d12922009-12-15 11:15:28 +0000242 /* when initializing use the value from init_rcv_wnd
243 * rather than the default from above
244 */
Nandita Dukkipatib1afde62010-12-03 13:33:44 +0000245 if (init_rcv_wnd)
246 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
247 else
248 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250
251 /* Set the clamp no higher than max representable value */
252 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
253}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000254EXPORT_SYMBOL(tcp_select_initial_window);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256/* Chose a new window to advertise, update state in tcp_sock for the
257 * socket, and return result with RFC1323 scaling applied. The return
258 * value can be stuffed directly into th->window for an outgoing
259 * frame.
260 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800261static u16 tcp_select_window(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
263 struct tcp_sock *tp = tcp_sk(sk);
264 u32 cur_win = tcp_receive_window(tp);
265 u32 new_win = __tcp_select_window(sk);
266
267 /* Never shrink the offered window */
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800268 if (new_win < cur_win) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 /* Danger Will Robinson!
270 * Don't update rcv_wup/rcv_wnd here or else
271 * we will not be able to advertise a zero
272 * window in time. --DaveM
273 *
274 * Relax Will Robinson.
275 */
Patrick McHardy607bfbf2008-03-20 16:11:27 -0700276 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 }
278 tp->rcv_wnd = new_win;
279 tp->rcv_wup = tp->rcv_nxt;
280
281 /* Make sure we do not exceed the maximum possible
282 * scaled window.
283 */
Rick Jones15d99e02006-03-20 22:40:29 -0800284 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 new_win = min(new_win, MAX_TCP_WINDOW);
286 else
287 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
288
289 /* RFC1323 scaling applied */
290 new_win >>= tp->rx_opt.rcv_wscale;
291
292 /* If we advertise zero window, disable fast path. */
293 if (new_win == 0)
294 tp->pred_flags = 0;
295
296 return new_win;
297}
298
Andi Kleen67edfef2009-07-21 23:00:40 +0000299/* Packet ECN state for a SYN-ACK */
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400300static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700301{
Eric Dumazet4de075e2011-09-27 13:25:05 -0400302 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800303 if (!(tp->ecn_flags & TCP_ECN_OK))
Eric Dumazet4de075e2011-09-27 13:25:05 -0400304 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700305}
306
Andi Kleen67edfef2009-07-21 23:00:40 +0000307/* Packet ECN state for a SYN. */
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700308static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
309{
310 struct tcp_sock *tp = tcp_sk(sk);
311
312 tp->ecn_flags = 0;
Hannes Frederic Sowa5d134f12013-01-05 16:10:48 +0000313 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
Eric Dumazet4de075e2011-09-27 13:25:05 -0400314 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700315 tp->ecn_flags = TCP_ECN_OK;
316 }
317}
318
319static __inline__ void
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400320TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700321{
322 if (inet_rsk(req)->ecn_ok)
323 th->ece = 1;
324}
325
Andi Kleen67edfef2009-07-21 23:00:40 +0000326/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
327 * be sent.
328 */
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700329static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
330 int tcp_header_len)
331{
332 struct tcp_sock *tp = tcp_sk(sk);
333
334 if (tp->ecn_flags & TCP_ECN_OK) {
335 /* Not-retransmitted data segment: set ECT and inject CWR. */
336 if (skb->len != tcp_header_len &&
337 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
338 INET_ECN_xmit(sk);
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800339 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700340 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
341 tcp_hdr(skb)->cwr = 1;
342 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
343 }
344 } else {
345 /* ACK or retransmitted segment: clear ECT|CE */
346 INET_ECN_dontxmit(sk);
347 }
348 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
349 tcp_hdr(skb)->ece = 1;
350 }
351}
352
Ilpo Järvinene870a8e2008-01-03 20:39:01 -0800353/* Constructs common control bits of non-data skb. If SYN/FIN is present,
354 * auto increment end seqno.
355 */
356static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
357{
David S. Miller2e8e18e2010-04-08 11:32:30 -0700358 skb->ip_summed = CHECKSUM_PARTIAL;
Ilpo Järvinene870a8e2008-01-03 20:39:01 -0800359 skb->csum = 0;
360
Eric Dumazet4de075e2011-09-27 13:25:05 -0400361 TCP_SKB_CB(skb)->tcp_flags = flags;
Ilpo Järvinene870a8e2008-01-03 20:39:01 -0800362 TCP_SKB_CB(skb)->sacked = 0;
363
364 skb_shinfo(skb)->gso_segs = 1;
365 skb_shinfo(skb)->gso_size = 0;
366 skb_shinfo(skb)->gso_type = 0;
367
368 TCP_SKB_CB(skb)->seq = seq;
Changli Gaoa3433f32010-06-12 14:01:43 +0000369 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
Ilpo Järvinene870a8e2008-01-03 20:39:01 -0800370 seq++;
371 TCP_SKB_CB(skb)->end_seq = seq;
372}
373
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000374static inline bool tcp_urg_mode(const struct tcp_sock *tp)
Ilpo Järvinen33f5f572008-10-07 14:43:06 -0700375{
376 return tp->snd_una != tp->snd_up;
377}
378
Adam Langley33ad7982008-07-19 00:04:31 -0700379#define OPTION_SACK_ADVERTISE (1 << 0)
380#define OPTION_TS (1 << 1)
381#define OPTION_MD5 (1 << 2)
Ori Finkelman89e95a62009-10-01 06:41:59 +0000382#define OPTION_WSCALE (1 << 3)
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000383#define OPTION_FAST_OPEN_COOKIE (1 << 8)
Adam Langley33ad7982008-07-19 00:04:31 -0700384
385struct tcp_out_options {
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000386 u16 options; /* bit field of OPTION_* */
387 u16 mss; /* 0 to disable */
Adam Langley33ad7982008-07-19 00:04:31 -0700388 u8 ws; /* window scale, 0 to disable */
389 u8 num_sack_blocks; /* number of SACK blocks to include */
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000390 u8 hash_size; /* bytes in hash_location */
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000391 __u8 *hash_location; /* temporary pointer, overloaded */
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000392 __u32 tsval, tsecr; /* need to include OPTION_TS */
393 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
Adam Langley33ad7982008-07-19 00:04:31 -0700394};
395
Andi Kleen67edfef2009-07-21 23:00:40 +0000396/* Write previously computed TCP options to the packet.
397 *
398 * Beware: Something in the Internet is very sensitive to the ordering of
Ilpo Järvinenfd6149d2008-10-23 14:06:35 -0700399 * TCP options, we learned this through the hard way, so be careful here.
400 * Luckily we can at least blame others for their non-compliance but from
401 * inter-operatibility perspective it seems that we're somewhat stuck with
402 * the ordering which we have been using if we want to keep working with
403 * those broken things (not that it currently hurts anybody as there isn't
404 * particular reason why the ordering would need to be changed).
405 *
406 * At least SACK_PERM as the first option is known to lead to a disaster
407 * (but it may well be that other scenarios fail similarly).
408 */
Adam Langley33ad7982008-07-19 00:04:31 -0700409static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000410 struct tcp_out_options *opts)
411{
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000412 u16 options = opts->options; /* mungable copy */
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000413
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000414 if (unlikely(OPTION_MD5 & options)) {
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000415 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
416 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000417 /* overload cookie hash location */
418 opts->hash_location = (__u8 *)ptr;
Adam Langley33ad7982008-07-19 00:04:31 -0700419 ptr += 4;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800420 }
Adam Langley33ad7982008-07-19 00:04:31 -0700421
Ilpo Järvinenfd6149d2008-10-23 14:06:35 -0700422 if (unlikely(opts->mss)) {
423 *ptr++ = htonl((TCPOPT_MSS << 24) |
424 (TCPOLEN_MSS << 16) |
425 opts->mss);
426 }
427
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000428 if (likely(OPTION_TS & options)) {
429 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700430 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
431 (TCPOLEN_SACK_PERM << 16) |
432 (TCPOPT_TIMESTAMP << 8) |
433 TCPOLEN_TIMESTAMP);
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000434 options &= ~OPTION_SACK_ADVERTISE;
Adam Langley33ad7982008-07-19 00:04:31 -0700435 } else {
436 *ptr++ = htonl((TCPOPT_NOP << 24) |
437 (TCPOPT_NOP << 16) |
438 (TCPOPT_TIMESTAMP << 8) |
439 TCPOLEN_TIMESTAMP);
440 }
441 *ptr++ = htonl(opts->tsval);
442 *ptr++ = htonl(opts->tsecr);
443 }
444
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000445 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700446 *ptr++ = htonl((TCPOPT_NOP << 24) |
447 (TCPOPT_NOP << 16) |
448 (TCPOPT_SACK_PERM << 8) |
449 TCPOLEN_SACK_PERM);
450 }
451
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000452 if (unlikely(OPTION_WSCALE & options)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700453 *ptr++ = htonl((TCPOPT_NOP << 24) |
454 (TCPOPT_WINDOW << 16) |
455 (TCPOLEN_WINDOW << 8) |
456 opts->ws);
457 }
458
459 if (unlikely(opts->num_sack_blocks)) {
460 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
461 tp->duplicate_sack : tp->selective_acks;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800462 int this_sack;
463
464 *ptr++ = htonl((TCPOPT_NOP << 24) |
465 (TCPOPT_NOP << 16) |
466 (TCPOPT_SACK << 8) |
Adam Langley33ad7982008-07-19 00:04:31 -0700467 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800468 TCPOLEN_SACK_PERBLOCK)));
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800469
Adam Langley33ad7982008-07-19 00:04:31 -0700470 for (this_sack = 0; this_sack < opts->num_sack_blocks;
471 ++this_sack) {
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800472 *ptr++ = htonl(sp[this_sack].start_seq);
473 *ptr++ = htonl(sp[this_sack].end_seq);
474 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800475
Ilpo Järvinen5861f8e2009-03-14 14:23:01 +0000476 tp->rx_opt.dsack = 0;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800477 }
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000478
479 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
480 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
481
482 *ptr++ = htonl((TCPOPT_EXP << 24) |
483 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
484 TCPOPT_FASTOPEN_MAGIC);
485
486 memcpy(ptr, foc->val, foc->len);
487 if ((foc->len & 3) == 2) {
488 u8 *align = ((u8 *)ptr) + foc->len;
489 align[0] = align[1] = TCPOPT_NOP;
490 }
491 ptr += (foc->len + 3) >> 2;
492 }
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800493}
494
Andi Kleen67edfef2009-07-21 23:00:40 +0000495/* Compute TCP options for SYN packets. This is not the final
496 * network wire format yet.
497 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000498static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
Adam Langley33ad7982008-07-19 00:04:31 -0700499 struct tcp_out_options *opts,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400500 struct tcp_md5sig_key **md5)
501{
Adam Langley33ad7982008-07-19 00:04:31 -0700502 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet95c96172012-04-15 05:58:06 +0000503 unsigned int remaining = MAX_TCP_OPTION_SPACE;
Yuchung Cheng783237e2012-07-19 06:43:07 +0000504 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
Adam Langley33ad7982008-07-19 00:04:31 -0700505
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800506#ifdef CONFIG_TCP_MD5SIG
Adam Langley33ad7982008-07-19 00:04:31 -0700507 *md5 = tp->af_specific->md5_lookup(sk, sk);
508 if (*md5) {
509 opts->options |= OPTION_MD5;
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000510 remaining -= TCPOLEN_MD5SIG_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800511 }
Adam Langley33ad7982008-07-19 00:04:31 -0700512#else
513 *md5 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800514#endif
Adam Langley33ad7982008-07-19 00:04:31 -0700515
516 /* We always get an MSS option. The option bytes which will be seen in
517 * normal data packets should timestamps be used, must be in the MSS
518 * advertised. But we subtract them from tp->mss_cache so that
519 * calculations in tcp_sendmsg are simpler etc. So account for this
520 * fact here if necessary. If we don't do this correctly, as a
521 * receiver we won't recognize data packets as being full sized when we
522 * should, and thus we won't abide by the delayed ACK rules correctly.
523 * SACKs don't matter, we never delay an ACK when we have any of those
524 * going out. */
525 opts->mss = tcp_advertise_mss(sk);
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000526 remaining -= TCPOLEN_MSS_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700527
David S. Millerbb5b7c12009-12-15 20:56:42 -0800528 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700529 opts->options |= OPTION_TS;
Andrey Vaginee684b62013-02-11 05:50:19 +0000530 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
Adam Langley33ad7982008-07-19 00:04:31 -0700531 opts->tsecr = tp->rx_opt.ts_recent;
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000532 remaining -= TCPOLEN_TSTAMP_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700533 }
David S. Millerbb5b7c12009-12-15 20:56:42 -0800534 if (likely(sysctl_tcp_window_scaling)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700535 opts->ws = tp->rx_opt.rcv_wscale;
Ori Finkelman89e95a62009-10-01 06:41:59 +0000536 opts->options |= OPTION_WSCALE;
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000537 remaining -= TCPOLEN_WSCALE_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700538 }
David S. Millerbb5b7c12009-12-15 20:56:42 -0800539 if (likely(sysctl_tcp_sack)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700540 opts->options |= OPTION_SACK_ADVERTISE;
David S. Millerb32d1312008-07-21 18:45:34 -0700541 if (unlikely(!(OPTION_TS & opts->options)))
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000542 remaining -= TCPOLEN_SACKPERM_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700543 }
544
Yuchung Cheng783237e2012-07-19 06:43:07 +0000545 if (fastopen && fastopen->cookie.len >= 0) {
546 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
547 need = (need + 3) & ~3U; /* Align to 32 bits */
548 if (remaining >= need) {
549 opts->options |= OPTION_FAST_OPEN_COOKIE;
550 opts->fastopen_cookie = &fastopen->cookie;
551 remaining -= need;
552 tp->syn_fastopen = 1;
553 }
554 }
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000555
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000556 return MAX_TCP_OPTION_SPACE - remaining;
Adam Langley33ad7982008-07-19 00:04:31 -0700557}
558
Andi Kleen67edfef2009-07-21 23:00:40 +0000559/* Set up TCP options for SYN-ACKs. */
Eric Dumazet95c96172012-04-15 05:58:06 +0000560static unsigned int tcp_synack_options(struct sock *sk,
Adam Langley33ad7982008-07-19 00:04:31 -0700561 struct request_sock *req,
Eric Dumazet95c96172012-04-15 05:58:06 +0000562 unsigned int mss, struct sk_buff *skb,
Adam Langley33ad7982008-07-19 00:04:31 -0700563 struct tcp_out_options *opts,
William Allen Simpson4957faade2009-12-02 18:25:27 +0000564 struct tcp_md5sig_key **md5,
Jerry Chu83368862012-08-31 12:29:12 +0000565 struct tcp_fastopen_cookie *foc)
William Allen Simpson4957faade2009-12-02 18:25:27 +0000566{
Adam Langley33ad7982008-07-19 00:04:31 -0700567 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet95c96172012-04-15 05:58:06 +0000568 unsigned int remaining = MAX_TCP_OPTION_SPACE;
Adam Langley33ad7982008-07-19 00:04:31 -0700569
570#ifdef CONFIG_TCP_MD5SIG
571 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
572 if (*md5) {
573 opts->options |= OPTION_MD5;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000574 remaining -= TCPOLEN_MD5SIG_ALIGNED;
575
576 /* We can't fit any SACK blocks in a packet with MD5 + TS
577 * options. There was discussion about disabling SACK
578 * rather than TS in order to fit in better with old,
579 * buggy kernels, but that was deemed to be unnecessary.
580 */
Eric Dumazetde213e5e2010-05-17 22:35:36 -0700581 ireq->tstamp_ok &= !ireq->sack_ok;
Adam Langley33ad7982008-07-19 00:04:31 -0700582 }
583#else
584 *md5 = NULL;
585#endif
586
William Allen Simpson4957faade2009-12-02 18:25:27 +0000587 /* We always send an MSS option. */
Adam Langley33ad7982008-07-19 00:04:31 -0700588 opts->mss = mss;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000589 remaining -= TCPOLEN_MSS_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700590
591 if (likely(ireq->wscale_ok)) {
592 opts->ws = ireq->rcv_wscale;
Ori Finkelman89e95a62009-10-01 06:41:59 +0000593 opts->options |= OPTION_WSCALE;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000594 remaining -= TCPOLEN_WSCALE_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700595 }
Eric Dumazetde213e5e2010-05-17 22:35:36 -0700596 if (likely(ireq->tstamp_ok)) {
Adam Langley33ad7982008-07-19 00:04:31 -0700597 opts->options |= OPTION_TS;
598 opts->tsval = TCP_SKB_CB(skb)->when;
599 opts->tsecr = req->ts_recent;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000600 remaining -= TCPOLEN_TSTAMP_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700601 }
602 if (likely(ireq->sack_ok)) {
603 opts->options |= OPTION_SACK_ADVERTISE;
Eric Dumazetde213e5e2010-05-17 22:35:36 -0700604 if (unlikely(!ireq->tstamp_ok))
William Allen Simpson4957faade2009-12-02 18:25:27 +0000605 remaining -= TCPOLEN_SACKPERM_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700606 }
Jerry Chu83368862012-08-31 12:29:12 +0000607 if (foc != NULL) {
608 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
609 need = (need + 3) & ~3U; /* Align to 32 bits */
610 if (remaining >= need) {
611 opts->options |= OPTION_FAST_OPEN_COOKIE;
612 opts->fastopen_cookie = foc;
613 remaining -= need;
614 }
615 }
William Allen Simpson4957faade2009-12-02 18:25:27 +0000616
William Allen Simpson4957faade2009-12-02 18:25:27 +0000617 return MAX_TCP_OPTION_SPACE - remaining;
Adam Langley33ad7982008-07-19 00:04:31 -0700618}
619
Andi Kleen67edfef2009-07-21 23:00:40 +0000620/* Compute TCP options for ESTABLISHED sockets. This is not the
621 * final wire format yet.
622 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000623static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
Adam Langley33ad7982008-07-19 00:04:31 -0700624 struct tcp_out_options *opts,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400625 struct tcp_md5sig_key **md5)
626{
Adam Langley33ad7982008-07-19 00:04:31 -0700627 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
628 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet95c96172012-04-15 05:58:06 +0000629 unsigned int size = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000630 unsigned int eff_sacks;
Adam Langley33ad7982008-07-19 00:04:31 -0700631
632#ifdef CONFIG_TCP_MD5SIG
633 *md5 = tp->af_specific->md5_lookup(sk, sk);
634 if (unlikely(*md5)) {
635 opts->options |= OPTION_MD5;
636 size += TCPOLEN_MD5SIG_ALIGNED;
637 }
638#else
639 *md5 = NULL;
640#endif
641
642 if (likely(tp->rx_opt.tstamp_ok)) {
643 opts->options |= OPTION_TS;
Andrey Vaginee684b62013-02-11 05:50:19 +0000644 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
Adam Langley33ad7982008-07-19 00:04:31 -0700645 opts->tsecr = tp->rx_opt.ts_recent;
646 size += TCPOLEN_TSTAMP_ALIGNED;
647 }
648
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000649 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
650 if (unlikely(eff_sacks)) {
Eric Dumazet95c96172012-04-15 05:58:06 +0000651 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
Adam Langley33ad7982008-07-19 00:04:31 -0700652 opts->num_sack_blocks =
Eric Dumazet95c96172012-04-15 05:58:06 +0000653 min_t(unsigned int, eff_sacks,
Adam Langley33ad7982008-07-19 00:04:31 -0700654 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
655 TCPOLEN_SACK_PERBLOCK);
656 size += TCPOLEN_SACK_BASE_ALIGNED +
657 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
658 }
659
660 return size;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800661}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000663
664/* TCP SMALL QUEUES (TSQ)
665 *
666 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
667 * to reduce RTT and bufferbloat.
668 * We do this using a special skb destructor (tcp_wfree).
669 *
670 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
671 * needs to be reallocated in a driver.
672 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
673 *
674 * Since transmit from skb destructor is forbidden, we use a tasklet
675 * to process all sockets that eventually need to send more skbs.
676 * We use one tasklet per cpu, with its own queue of sockets.
677 */
678struct tsq_tasklet {
679 struct tasklet_struct tasklet;
680 struct list_head head; /* queue of tcp sockets */
681};
682static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
683
Eric Dumazet6f458df2012-07-20 05:45:50 +0000684static void tcp_tsq_handler(struct sock *sk)
685{
686 if ((1 << sk->sk_state) &
687 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
688 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
John Ogness94ee16a2014-02-09 18:40:11 -0800689 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
690 0, GFP_ATOMIC);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000691}
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000692/*
693 * One tasklest per cpu tries to send more skbs.
694 * We run in tasklet context but need to disable irqs when
695 * transfering tsq->head because tcp_wfree() might
696 * interrupt us (non NAPI drivers)
697 */
698static void tcp_tasklet_func(unsigned long data)
699{
700 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
701 LIST_HEAD(list);
702 unsigned long flags;
703 struct list_head *q, *n;
704 struct tcp_sock *tp;
705 struct sock *sk;
706
707 local_irq_save(flags);
708 list_splice_init(&tsq->head, &list);
709 local_irq_restore(flags);
710
711 list_for_each_safe(q, n, &list) {
712 tp = list_entry(q, struct tcp_sock, tsq_node);
713 list_del(&tp->tsq_node);
714
715 sk = (struct sock *)tp;
716 bh_lock_sock(sk);
717
718 if (!sock_owned_by_user(sk)) {
Eric Dumazet6f458df2012-07-20 05:45:50 +0000719 tcp_tsq_handler(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000720 } else {
721 /* defer the work to tcp_release_cb() */
Eric Dumazet6f458df2012-07-20 05:45:50 +0000722 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000723 }
724 bh_unlock_sock(sk);
725
726 clear_bit(TSQ_QUEUED, &tp->tsq_flags);
727 sk_free(sk);
728 }
729}
730
Eric Dumazet6f458df2012-07-20 05:45:50 +0000731#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
732 (1UL << TCP_WRITE_TIMER_DEFERRED) | \
Eric Dumazet563d34d2012-07-23 09:48:52 +0200733 (1UL << TCP_DELACK_TIMER_DEFERRED) | \
734 (1UL << TCP_MTU_REDUCED_DEFERRED))
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000735/**
736 * tcp_release_cb - tcp release_sock() callback
737 * @sk: socket
738 *
739 * called from release_sock() to perform protocol dependent
740 * actions before socket release.
741 */
742void tcp_release_cb(struct sock *sk)
743{
744 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000745 unsigned long flags, nflags;
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000746
Eric Dumazet6f458df2012-07-20 05:45:50 +0000747 /* perform an atomic operation only if at least one flag is set */
748 do {
749 flags = tp->tsq_flags;
750 if (!(flags & TCP_DEFERRED_ALL))
751 return;
752 nflags = flags & ~TCP_DEFERRED_ALL;
753 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
754
755 if (flags & (1UL << TCP_TSQ_DEFERRED))
756 tcp_tsq_handler(sk);
757
Eric Dumazetcbbb5a22014-03-10 09:50:11 -0700758 /* Here begins the tricky part :
759 * We are called from release_sock() with :
760 * 1) BH disabled
761 * 2) sk_lock.slock spinlock held
762 * 3) socket owned by us (sk->sk_lock.owned == 1)
763 *
764 * But following code is meant to be called from BH handlers,
765 * so we should keep BH disabled, but early release socket ownership
766 */
767 sock_release_ownership(sk);
768
Eric Dumazet144d56e2012-08-20 00:22:46 +0000769 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
Eric Dumazet6f458df2012-07-20 05:45:50 +0000770 tcp_write_timer_handler(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000771 __sock_put(sk);
772 }
773 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
Eric Dumazet6f458df2012-07-20 05:45:50 +0000774 tcp_delack_timer_handler(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000775 __sock_put(sk);
776 }
777 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200778 sk->sk_prot->mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000779 __sock_put(sk);
780 }
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000781}
782EXPORT_SYMBOL(tcp_release_cb);
783
784void __init tcp_tasklet_init(void)
785{
786 int i;
787
788 for_each_possible_cpu(i) {
789 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
790
791 INIT_LIST_HEAD(&tsq->head);
792 tasklet_init(&tsq->tasklet,
793 tcp_tasklet_func,
794 (unsigned long)tsq);
795 }
796}
797
798/*
799 * Write buffer destructor automatically called from kfree_skb.
800 * We cant xmit new skbs from this context, as we might already
801 * hold qdisc lock.
802 */
Eric Dumazetd6a4a102013-04-12 11:31:52 +0000803void tcp_wfree(struct sk_buff *skb)
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000804{
805 struct sock *sk = skb->sk;
806 struct tcp_sock *tp = tcp_sk(sk);
807
808 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
809 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
810 unsigned long flags;
811 struct tsq_tasklet *tsq;
812
813 /* Keep a ref on socket.
814 * This last ref will be released in tcp_tasklet_func()
815 */
816 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
817
818 /* queue this socket to tasklet queue */
819 local_irq_save(flags);
820 tsq = &__get_cpu_var(tsq_tasklet);
821 list_add(&tp->tsq_node, &tsq->head);
822 tasklet_schedule(&tsq->tasklet);
823 local_irq_restore(flags);
824 } else {
825 sock_wfree(skb);
826 }
827}
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829/* This routine actually transmits TCP packets queued in by
830 * tcp_do_sendmsg(). This is used by both the initial
831 * transmission and possible later retransmissions.
832 * All SKB's seen here are completely headerless. It is our
833 * job to build the TCP header, and pass the packet down to
834 * IP so it can do the same plus pass the packet off to the
835 * device.
836 *
837 * We are working here with either a clone of the original
838 * SKB, or a fresh unique copy made by the retransmit engine.
839 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800840static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
841 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800843 const struct inet_connection_sock *icsk = inet_csk(sk);
844 struct inet_sock *inet;
845 struct tcp_sock *tp;
846 struct tcp_skb_cb *tcb;
Adam Langley33ad7982008-07-19 00:04:31 -0700847 struct tcp_out_options opts;
Eric Dumazet95c96172012-04-15 05:58:06 +0000848 unsigned int tcp_options_size, tcp_header_size;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800849 struct tcp_md5sig_key *md5;
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800850 struct tcphdr *th;
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800851 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800853 BUG_ON(!skb || !tcp_skb_pcount(skb));
854
855 /* If congestion control is doing timestamping, we must
856 * take such a timestamp before we potentially clone/copy.
857 */
Stephen Hemminger164891a2007-04-23 22:26:16 -0700858 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800859 __net_timestamp(skb);
860
861 if (likely(clone_it)) {
Eric Dumazet0e280af2013-04-18 06:52:51 +0000862 const struct sk_buff *fclone = skb + 1;
863
864 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
865 fclone->fclone == SKB_FCLONE_CLONE))
866 NET_INC_STATS_BH(sock_net(sk),
867 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
868
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800869 if (unlikely(skb_cloned(skb)))
870 skb = pskb_copy(skb, gfp_mask);
871 else
872 skb = skb_clone(skb, gfp_mask);
873 if (unlikely(!skb))
874 return -ENOBUFS;
875 }
876
877 inet = inet_sk(sk);
878 tp = tcp_sk(sk);
879 tcb = TCP_SKB_CB(skb);
Adam Langley33ad7982008-07-19 00:04:31 -0700880 memset(&opts, 0, sizeof(opts));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet4de075e2011-09-27 13:25:05 -0400882 if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
Adam Langley33ad7982008-07-19 00:04:31 -0700883 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
884 else
885 tcp_options_size = tcp_established_options(sk, skb, &opts,
886 &md5);
887 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900888
Eric Dumazet547669d2013-05-23 07:44:20 +0000889 if (tcp_packets_in_flight(tp) == 0)
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800890 tcp_ca_event(sk, CA_EVENT_TX_START);
Eric Dumazet547669d2013-05-23 07:44:20 +0000891
892 /* if no packet is in qdisc/device queue, then allow XPS to select
893 * another queue.
894 */
895 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800896
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700897 skb_push(skb, tcp_header_size);
898 skb_reset_transport_header(skb);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000899
900 skb_orphan(skb);
901 skb->sk = sk;
Eric Dumazet0ae5f472013-09-27 03:28:54 -0700902 skb->destructor = tcp_wfree;
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000903 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800904
905 /* Build TCP header and checksum it. */
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700906 th = tcp_hdr(skb);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000907 th->source = inet->inet_sport;
908 th->dest = inet->inet_dport;
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800909 th->seq = htonl(tcb->seq);
910 th->ack_seq = htonl(tp->rcv_nxt);
Al Virodf7a3b02006-09-27 18:38:52 -0700911 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
Eric Dumazet4de075e2011-09-27 13:25:05 -0400912 tcb->tcp_flags);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800913
Eric Dumazet4de075e2011-09-27 13:25:05 -0400914 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800915 /* RFC1323: The window in SYN & SYN/ACK segments
916 * is never scaled.
917 */
Ilpo Järvinen600ff0c2007-02-13 12:42:11 -0800918 th->window = htons(min(tp->rcv_wnd, 65535U));
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800919 } else {
920 th->window = htons(tcp_select_window(sk));
921 }
922 th->check = 0;
923 th->urg_ptr = 0;
924
Ilpo Järvinen33f5f572008-10-07 14:43:06 -0700925 /* The urg_mode check is necessary during a below snd_una win probe */
Herbert Xu7691367d2009-02-21 23:52:29 -0800926 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
927 if (before(tp->snd_up, tcb->seq + 0x10000)) {
928 th->urg_ptr = htons(tp->snd_up - tcb->seq);
929 th->urg = 1;
930 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700931 th->urg_ptr = htons(0xFFFF);
Herbert Xu7691367d2009-02-21 23:52:29 -0800932 th->urg = 1;
933 }
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800934 }
935
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000936 tcp_options_write((__be32 *)(th + 1), tp, &opts);
Eric Dumazet4de075e2011-09-27 13:25:05 -0400937 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700938 TCP_ECN_send(sk, skb, tcp_header_size);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800939
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800940#ifdef CONFIG_TCP_MD5SIG
941 /* Calculate the MD5 hash, as we have all we need now */
942 if (md5) {
Eric Dumazeta4654192010-05-16 00:36:33 -0700943 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
William Allen Simpsonbd0388a2009-12-02 18:23:05 +0000944 tp->af_specific->calc_md5_hash(opts.hash_location,
Adam Langley49a72df2008-07-19 00:01:42 -0700945 md5, sk, NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800946 }
947#endif
948
Herbert Xubb296242010-04-11 02:15:55 +0000949 icsk->icsk_af_ops->send_check(sk, skb);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800950
Eric Dumazet4de075e2011-09-27 13:25:05 -0400951 if (likely(tcb->tcp_flags & TCPHDR_ACK))
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800952 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
953
954 if (skb->len != tcp_header_size)
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400955 tcp_event_data_sent(tp, sk);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800956
Wei Yongjunbd37a082006-08-07 21:04:15 -0700957 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000958 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
959 tcp_skb_pcount(skb));
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800960
David S. Millerd9d8da82011-05-06 22:23:20 -0700961 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
Hua Zhong83de47c2006-04-28 15:26:50 -0700962 if (likely(err <= 0))
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800963 return err;
964
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800965 tcp_enter_cwr(sk, 1);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800966
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200967 return net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968}
969
Andi Kleen67edfef2009-07-21 23:00:40 +0000970/* This routine just queues the buffer for sending.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 *
972 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
973 * otherwise socket can stall.
974 */
975static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
976{
977 struct tcp_sock *tp = tcp_sk(sk);
978
979 /* Advance write_seq and place onto the write_queue. */
980 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
981 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800982 tcp_add_write_queue_tail(sk, skb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800983 sk->sk_wmem_queued += skb->truesize;
984 sk_mem_charge(sk, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985}
986
Andi Kleen67edfef2009-07-21 23:00:40 +0000987/* Initialize TSO segments for a packet. */
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400988static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800989 unsigned int mss_now)
David S. Millerf6302d12005-07-05 15:18:03 -0700990{
Eric Dumazetb81908e2013-10-15 11:54:30 -0700991 /* Make sure we own this skb before messing gso_size/gso_segs */
992 WARN_ON_ONCE(skb_cloned(skb));
993
Herbert Xu8e5b9dd2009-06-28 18:03:30 +0000994 if (skb->len <= mss_now || !sk_can_gso(sk) ||
995 skb->ip_summed == CHECKSUM_NONE) {
David S. Millerf6302d12005-07-05 15:18:03 -0700996 /* Avoid the costly divide in the normal
997 * non-TSO case.
998 */
Herbert Xu79671682006-06-22 02:40:14 -0700999 skb_shinfo(skb)->gso_segs = 1;
1000 skb_shinfo(skb)->gso_size = 0;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +00001001 skb_shinfo(skb)->gso_type = 0;
David S. Millerf6302d12005-07-05 15:18:03 -07001002 } else {
Ilpo Järvinen356f89e2007-08-24 23:00:31 -07001003 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
Herbert Xu79671682006-06-22 02:40:14 -07001004 skb_shinfo(skb)->gso_size = mss_now;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +00001005 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 }
1007}
1008
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -07001009/* When a modification to fackets out becomes necessary, we need to check
Ilpo Järvinen68f83532007-11-15 19:50:37 -08001010 * skb is counted to fackets_out or not.
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -07001011 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001012static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -07001013 int decr)
1014{
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001015 struct tcp_sock *tp = tcp_sk(sk);
1016
Ilpo Järvinendc869672007-10-01 15:27:19 -07001017 if (!tp->sacked_out || tcp_is_reno(tp))
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -07001018 return;
1019
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001020 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -07001021 tp->fackets_out -= decr;
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -07001022}
1023
Ilpo Järvinen797108d2009-04-01 23:15:17 +00001024/* Pcount in the middle of the write queue got changed, we need to do various
1025 * tweaks to fix counters
1026 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001027static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
Ilpo Järvinen797108d2009-04-01 23:15:17 +00001028{
1029 struct tcp_sock *tp = tcp_sk(sk);
1030
1031 tp->packets_out -= decr;
1032
1033 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1034 tp->sacked_out -= decr;
1035 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1036 tp->retrans_out -= decr;
1037 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1038 tp->lost_out -= decr;
1039
1040 /* Reno case is special. Sigh... */
1041 if (tcp_is_reno(tp) && decr > 0)
1042 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1043
1044 tcp_adjust_fackets_out(sk, skb, decr);
1045
1046 if (tp->lost_skb_hint &&
1047 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
Ilpo Järvinen52cf3cc2009-04-18 05:48:48 +00001048 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
Ilpo Järvinen797108d2009-04-01 23:15:17 +00001049 tp->lost_cnt_hint -= decr;
1050
1051 tcp_verify_left_out(tp);
1052}
1053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054/* Function to create two new TCP segments. Shrinks the given segment
1055 * to the specified size and appends a new segment with the rest of the
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001056 * packet to the list. This won't be called frequently, I hope.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 * Remember, these are still headerless SKBs at this point.
1058 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001059int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1060 unsigned int mss_now)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
1062 struct tcp_sock *tp = tcp_sk(sk);
1063 struct sk_buff *buff;
David S. Miller6475be12005-09-01 22:47:01 -07001064 int nsize, old_factor;
Herbert Xub60b49e2006-04-19 21:35:00 -07001065 int nlen;
Ilpo Järvinen9ce01462009-02-28 04:44:42 +00001066 u8 flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Ilpo Järvinen2fceec12011-04-01 21:47:41 -07001068 if (WARN_ON(len > skb->len))
1069 return -EINVAL;
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001070
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 nsize = skb_headlen(skb) - len;
1072 if (nsize < 0)
1073 nsize = 0;
1074
Eric Dumazetb81908e2013-10-15 11:54:30 -07001075 if (skb_unclone(skb, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 return -ENOMEM;
1077
1078 /* Get a new skb... force flag on. */
1079 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1080 if (buff == NULL)
1081 return -ENOMEM; /* We'll just try again later. */
Herbert Xuef5cb972006-04-18 13:24:14 -07001082
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001083 sk->sk_wmem_queued += buff->truesize;
1084 sk_mem_charge(sk, buff->truesize);
Herbert Xub60b49e2006-04-19 21:35:00 -07001085 nlen = skb->len - len - nsize;
1086 buff->truesize += nlen;
1087 skb->truesize -= nlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 /* Correct the sequence numbers. */
1090 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1091 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1092 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1093
1094 /* PSH and FIN should only be set in the second packet. */
Eric Dumazet4de075e2011-09-27 13:25:05 -04001095 flags = TCP_SKB_CB(skb)->tcp_flags;
1096 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1097 TCP_SKB_CB(buff)->tcp_flags = flags;
Herbert Xue14c3ca2005-09-19 18:18:38 -07001098 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Patrick McHardy84fa7932006-08-29 16:44:56 -07001100 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 /* Copy and checksum data tail into the new buffer. */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001102 buff->csum = csum_partial_copy_nocheck(skb->data + len,
1103 skb_put(buff, nsize),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 nsize, 0);
1105
1106 skb_trim(skb, len);
1107
1108 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1109 } else {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001110 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 skb_split(skb, buff, len);
1112 }
1113
1114 buff->ip_summed = skb->ip_summed;
1115
1116 /* Looks stupid, but our code really uses when of
1117 * skbs, which it never sent before. --ANK
1118 */
1119 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001120 buff->tstamp = skb->tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
David S. Miller6475be12005-09-01 22:47:01 -07001122 old_factor = tcp_skb_pcount(skb);
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 /* Fix up tso_factor for both original and new SKB. */
David S. Miller846998a2005-08-04 19:52:01 -07001125 tcp_set_skb_tso_segs(sk, skb, mss_now);
1126 tcp_set_skb_tso_segs(sk, buff, mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
David S. Miller6475be12005-09-01 22:47:01 -07001128 /* If this packet has been sent out already, we must
1129 * adjust the various packet counters.
1130 */
Herbert Xucf0b4502005-09-08 15:10:52 -07001131 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
David S. Miller6475be12005-09-01 22:47:01 -07001132 int diff = old_factor - tcp_skb_pcount(skb) -
1133 tcp_skb_pcount(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Ilpo Järvinen797108d2009-04-01 23:15:17 +00001135 if (diff)
1136 tcp_adjust_pcount(sk, skb, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 }
1138
1139 /* Link BUFF into the send queue. */
David S. Millerf44b5272005-07-05 15:18:34 -07001140 skb_header_release(buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001141 tcp_insert_write_queue_after(skb, buff, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 return 0;
1144}
1145
1146/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1147 * eventually). The difference is that pulled data not copied, but
1148 * immediately discarded.
1149 */
Herbert Xu ~{PmVHI~}f2911962006-06-05 15:03:37 -07001150static void __pskb_trim_head(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151{
1152 int i, k, eat;
1153
Eric Dumazet4fa48bf2011-12-04 08:51:08 +00001154 eat = min_t(int, len, skb_headlen(skb));
1155 if (eat) {
1156 __skb_pull(skb, eat);
1157 len -= eat;
1158 if (!len)
1159 return;
1160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 eat = len;
1162 k = 0;
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001163 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001164 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1165
1166 if (size <= eat) {
Ian Campbellaff65da2011-08-22 23:44:59 +00001167 skb_frag_unref(skb, i);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001168 eat -= size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 } else {
1170 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1171 if (eat) {
1172 skb_shinfo(skb)->frags[k].page_offset += eat;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001173 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 eat = 0;
1175 }
1176 k++;
1177 }
1178 }
1179 skb_shinfo(skb)->nr_frags = k;
1180
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001181 skb_reset_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 skb->data_len -= len;
1183 skb->len = skb->data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184}
1185
Andi Kleen67edfef2009-07-21 23:00:40 +00001186/* Remove acked data from a packet in the transmit queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1188{
Pravin B Shelar14bbd6a2013-02-14 09:44:49 +00001189 if (skb_unclone(skb, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 return -ENOMEM;
1191
Eric Dumazet4fa48bf2011-12-04 08:51:08 +00001192 __pskb_trim_head(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194 TCP_SKB_CB(skb)->seq += len;
Patrick McHardy84fa7932006-08-29 16:44:56 -07001195 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
1197 skb->truesize -= len;
1198 sk->sk_wmem_queued -= len;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001199 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1201
Neal Cardwell5b35e1e2012-01-28 17:29:46 +00001202 /* Any change of skb->len requires recalculation of tso factor. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 if (tcp_skb_pcount(skb) > 1)
Neal Cardwell5b35e1e2012-01-28 17:29:46 +00001204 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 return 0;
1207}
1208
Yuchung Cheng1b63edd2013-02-22 08:59:06 +00001209/* Calculate MSS not accounting any TCP options. */
1210static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
John Heffner5d424d52006-03-20 17:53:41 -08001211{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001212 const struct tcp_sock *tp = tcp_sk(sk);
1213 const struct inet_connection_sock *icsk = inet_csk(sk);
John Heffner5d424d52006-03-20 17:53:41 -08001214 int mss_now;
1215
1216 /* Calculate base mss without TCP options:
1217 It is MMS_S - sizeof(tcphdr) of rfc1122
1218 */
1219 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1220
Eric Dumazet67469602012-04-24 07:37:38 +00001221 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1222 if (icsk->icsk_af_ops->net_frag_header_len) {
1223 const struct dst_entry *dst = __sk_dst_get(sk);
1224
1225 if (dst && dst_allfrag(dst))
1226 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1227 }
1228
John Heffner5d424d52006-03-20 17:53:41 -08001229 /* Clamp it (mss_clamp does not include tcp options) */
1230 if (mss_now > tp->rx_opt.mss_clamp)
1231 mss_now = tp->rx_opt.mss_clamp;
1232
1233 /* Now subtract optional transport overhead */
1234 mss_now -= icsk->icsk_ext_hdr_len;
1235
1236 /* Then reserve room for full set of TCP options and 8 bytes of data */
1237 if (mss_now < 48)
1238 mss_now = 48;
John Heffner5d424d52006-03-20 17:53:41 -08001239 return mss_now;
1240}
1241
Yuchung Cheng1b63edd2013-02-22 08:59:06 +00001242/* Calculate MSS. Not accounting for SACKs here. */
1243int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1244{
1245 /* Subtract TCP options size, not including SACKs */
1246 return __tcp_mtu_to_mss(sk, pmtu) -
1247 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1248}
1249
John Heffner5d424d52006-03-20 17:53:41 -08001250/* Inverse of above */
Eric Dumazet67469602012-04-24 07:37:38 +00001251int tcp_mss_to_mtu(struct sock *sk, int mss)
John Heffner5d424d52006-03-20 17:53:41 -08001252{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001253 const struct tcp_sock *tp = tcp_sk(sk);
1254 const struct inet_connection_sock *icsk = inet_csk(sk);
John Heffner5d424d52006-03-20 17:53:41 -08001255 int mtu;
1256
1257 mtu = mss +
1258 tp->tcp_header_len +
1259 icsk->icsk_ext_hdr_len +
1260 icsk->icsk_af_ops->net_header_len;
1261
Eric Dumazet67469602012-04-24 07:37:38 +00001262 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1263 if (icsk->icsk_af_ops->net_frag_header_len) {
1264 const struct dst_entry *dst = __sk_dst_get(sk);
1265
1266 if (dst && dst_allfrag(dst))
1267 mtu += icsk->icsk_af_ops->net_frag_header_len;
1268 }
John Heffner5d424d52006-03-20 17:53:41 -08001269 return mtu;
1270}
1271
Andi Kleen67edfef2009-07-21 23:00:40 +00001272/* MTU probing init per socket */
John Heffner5d424d52006-03-20 17:53:41 -08001273void tcp_mtup_init(struct sock *sk)
1274{
1275 struct tcp_sock *tp = tcp_sk(sk);
1276 struct inet_connection_sock *icsk = inet_csk(sk);
1277
1278 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1279 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001280 icsk->icsk_af_ops->net_header_len;
John Heffner5d424d52006-03-20 17:53:41 -08001281 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1282 icsk->icsk_mtup.probe_size = 0;
1283}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001284EXPORT_SYMBOL(tcp_mtup_init);
John Heffner5d424d52006-03-20 17:53:41 -08001285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286/* This function synchronize snd mss to current pmtu/exthdr set.
1287
1288 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1289 for TCP options, but includes only bare TCP header.
1290
1291 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001292 It is minimum of user_mss and mss received with SYN.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 It also does not include TCP options.
1294
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001295 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 tp->mss_cache is current effective sending mss, including
1298 all tcp options except for SACKs. It is evaluated,
1299 taking into account current pmtu, but never exceeds
1300 tp->rx_opt.mss_clamp.
1301
1302 NOTE1. rfc1122 clearly states that advertised MSS
1303 DOES NOT include either tcp or ip options.
1304
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001305 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1306 are READ ONLY outside this function. --ANK (980731)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1309{
1310 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001311 struct inet_connection_sock *icsk = inet_csk(sk);
John Heffner5d424d52006-03-20 17:53:41 -08001312 int mss_now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
John Heffner5d424d52006-03-20 17:53:41 -08001314 if (icsk->icsk_mtup.search_high > pmtu)
1315 icsk->icsk_mtup.search_high = pmtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
John Heffner5d424d52006-03-20 17:53:41 -08001317 mss_now = tcp_mtu_to_mss(sk, pmtu);
Ilpo Järvinen409d22b2007-12-31 14:57:40 -08001318 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
1320 /* And store cached results */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001321 icsk->icsk_pmtu_cookie = pmtu;
John Heffner5d424d52006-03-20 17:53:41 -08001322 if (icsk->icsk_mtup.enabled)
1323 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001324 tp->mss_cache = mss_now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 return mss_now;
1327}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001328EXPORT_SYMBOL(tcp_sync_mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330/* Compute the current effective MSS, taking SACKs and IP options,
1331 * and even PMTU discovery events into account.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 */
Ilpo Järvinen0c54b852009-03-14 14:23:05 +00001333unsigned int tcp_current_mss(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001335 const struct tcp_sock *tp = tcp_sk(sk);
1336 const struct dst_entry *dst = __sk_dst_get(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001337 u32 mss_now;
Eric Dumazet95c96172012-04-15 05:58:06 +00001338 unsigned int header_len;
Adam Langley33ad7982008-07-19 00:04:31 -07001339 struct tcp_out_options opts;
1340 struct tcp_md5sig_key *md5;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001342 mss_now = tp->mss_cache;
1343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 if (dst) {
1345 u32 mtu = dst_mtu(dst);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001346 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 mss_now = tcp_sync_mss(sk, mtu);
1348 }
1349
Adam Langley33ad7982008-07-19 00:04:31 -07001350 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1351 sizeof(struct tcphdr);
1352 /* The mss_cache is sized based on tp->tcp_header_len, which assumes
1353 * some common options. If this is an odd packet (because we have SACK
1354 * blocks etc) then our calculated header_len will be different, and
1355 * we have to adjust mss_now correspondingly */
1356 if (header_len != tp->tcp_header_len) {
1357 int delta = (int) header_len - tp->tcp_header_len;
1358 mss_now -= delta;
1359 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001360
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 return mss_now;
1362}
1363
David S. Millera762a982005-07-05 15:18:51 -07001364/* Congestion window validation. (RFC2861) */
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001365static void tcp_cwnd_validate(struct sock *sk)
David S. Millera762a982005-07-05 15:18:51 -07001366{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001367 struct tcp_sock *tp = tcp_sk(sk);
David S. Millera762a982005-07-05 15:18:51 -07001368
Ilpo Järvinend436d682007-12-31 14:58:00 -08001369 if (tp->packets_out >= tp->snd_cwnd) {
David S. Millera762a982005-07-05 15:18:51 -07001370 /* Network is feed fully. */
1371 tp->snd_cwnd_used = 0;
1372 tp->snd_cwnd_stamp = tcp_time_stamp;
1373 } else {
1374 /* Network starves. */
1375 if (tp->packets_out > tp->snd_cwnd_used)
1376 tp->snd_cwnd_used = tp->packets_out;
1377
David S. Miller15d33c02007-04-09 13:23:14 -07001378 if (sysctl_tcp_slow_start_after_idle &&
1379 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
David S. Millera762a982005-07-05 15:18:51 -07001380 tcp_cwnd_application_limited(sk);
1381 }
1382}
1383
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001384/* Returns the portion of skb which can be sent right away without
1385 * introducing MSS oddities to segment boundaries. In rare cases where
1386 * mss_now != mss_cache, we will request caller to create a small skb
1387 * per input skb which could be mostly avoided here (if desired).
Ilpo Järvinen5ea3a742008-03-11 17:55:27 -07001388 *
1389 * We explicitly want to create a request for splitting write queue tail
1390 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1391 * thus all the complexity (cwnd_len is always MSS multiple which we
1392 * return whenever allowed by the other factors). Basically we need the
1393 * modulo only when the receiver window alone is the limiting factor or
1394 * when we would be allowed to send the split-due-to-Nagle skb fully.
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001395 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001396static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
Ben Hutchings14853482012-07-30 16:11:42 +00001397 unsigned int mss_now, unsigned int max_segs)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001398{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001399 const struct tcp_sock *tp = tcp_sk(sk);
Ben Hutchings14853482012-07-30 16:11:42 +00001400 u32 needed, window, max_len;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001401
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001402 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
Ben Hutchings14853482012-07-30 16:11:42 +00001403 max_len = mss_now * max_segs;
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001404
Ben Hutchings14853482012-07-30 16:11:42 +00001405 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1406 return max_len;
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001407
Ilpo Järvinen5ea3a742008-03-11 17:55:27 -07001408 needed = min(skb->len, window);
1409
Ben Hutchings14853482012-07-30 16:11:42 +00001410 if (max_len <= needed)
1411 return max_len;
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001412
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001413 return needed - needed % mss_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001414}
1415
1416/* Can at least one segment of SKB be sent right now, according to the
1417 * congestion window rules? If so, return how many segments are allowed.
1418 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001419static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1420 const struct sk_buff *skb)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001421{
1422 u32 in_flight, cwnd;
1423
1424 /* Don't be strict about the congestion window for the final FIN. */
Eric Dumazet4de075e2011-09-27 13:25:05 -04001425 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1426 tcp_skb_pcount(skb) == 1)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001427 return 1;
1428
1429 in_flight = tcp_packets_in_flight(tp);
1430 cwnd = tp->snd_cwnd;
1431 if (in_flight < cwnd)
1432 return (cwnd - in_flight);
1433
1434 return 0;
1435}
1436
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001437/* Initialize TSO state of a skb.
Andi Kleen67edfef2009-07-21 23:00:40 +00001438 * This must be invoked the first time we consider transmitting
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001439 * SKB onto the wire.
1440 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001441static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001442 unsigned int mss_now)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001443{
1444 int tso_segs = tcp_skb_pcount(skb);
1445
Ilpo Järvinenf8269a42008-12-03 21:24:48 -08001446 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
David S. Miller846998a2005-08-04 19:52:01 -07001447 tcp_set_skb_tso_segs(sk, skb, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001448 tso_segs = tcp_skb_pcount(skb);
1449 }
1450 return tso_segs;
1451}
1452
Andi Kleen67edfef2009-07-21 23:00:40 +00001453/* Minshall's variant of the Nagle send check. */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001454static inline bool tcp_minshall_check(const struct tcp_sock *tp)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001455{
Jianjun Kong09cb1052008-11-03 00:27:11 -08001456 return after(tp->snd_sml, tp->snd_una) &&
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001457 !after(tp->snd_sml, tp->snd_nxt);
1458}
1459
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001460/* Return false, if packet can be sent now without violation Nagle's rules:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001461 * 1. It is full sized.
1462 * 2. Or it contains FIN. (already checked by caller)
Feng King6d67e9b2011-11-05 04:23:23 +00001463 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001464 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1465 * With Minshall's modification: all sent small packets are ACKed.
1466 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001467static inline bool tcp_nagle_check(const struct tcp_sock *tp,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001468 const struct sk_buff *skb,
Eric Dumazet95c96172012-04-15 05:58:06 +00001469 unsigned int mss_now, int nonagle)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001470{
Eric Dumazeta02cec22010-09-22 20:43:57 +00001471 return skb->len < mss_now &&
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001472 ((nonagle & TCP_NAGLE_CORK) ||
Eric Dumazeta02cec22010-09-22 20:43:57 +00001473 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001474}
1475
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001476/* Return true if the Nagle test allows this packet to be
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001477 * sent now.
1478 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001479static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1480 unsigned int cur_mss, int nonagle)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001481{
1482 /* Nagle rule does not apply to frames, which sit in the middle of the
1483 * write_queue (they have no chances to get new data).
1484 *
1485 * This is implemented in the callers, where they modify the 'nonagle'
1486 * argument based upon the location of SKB in the send queue.
1487 */
1488 if (nonagle & TCP_NAGLE_PUSH)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001489 return true;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001490
Yuchung Cheng9b441902013-03-20 13:32:58 +00001491 /* Don't use the nagle rule for urgent data (or for the final FIN). */
1492 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001493 return true;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001494
1495 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001496 return true;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001497
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001498 return false;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001499}
1500
1501/* Does at least the first segment of SKB fit into the send window? */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001502static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1503 const struct sk_buff *skb,
1504 unsigned int cur_mss)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001505{
1506 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1507
1508 if (skb->len > cur_mss)
1509 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1510
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001511 return !after(end_seq, tcp_wnd_end(tp));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001512}
1513
David S. Millerfe067e82007-03-07 12:12:44 -08001514/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001515 * should be put on the wire right now. If so, it returns the number of
1516 * packets allowed by the congestion window.
1517 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001518static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001519 unsigned int cur_mss, int nonagle)
1520{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001521 const struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001522 unsigned int cwnd_quota;
1523
David S. Miller846998a2005-08-04 19:52:01 -07001524 tcp_init_tso_segs(sk, skb, cur_mss);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001525
1526 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1527 return 0;
1528
1529 cwnd_quota = tcp_cwnd_test(tp, skb);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001530 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001531 cwnd_quota = 0;
1532
1533 return cwnd_quota;
1534}
1535
Andi Kleen67edfef2009-07-21 23:00:40 +00001536/* Test if sending is allowed right now. */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001537bool tcp_may_send_now(struct sock *sk)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001538{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001539 const struct tcp_sock *tp = tcp_sk(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001540 struct sk_buff *skb = tcp_send_head(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001541
Eric Dumazeta02cec22010-09-22 20:43:57 +00001542 return skb &&
Ilpo Järvinen0c54b852009-03-14 14:23:05 +00001543 tcp_snd_test(sk, skb, tcp_current_mss(sk),
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001544 (tcp_skb_is_last(sk, skb) ?
Eric Dumazeta02cec22010-09-22 20:43:57 +00001545 tp->nonagle : TCP_NAGLE_PUSH));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001546}
1547
1548/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1549 * which is put after SKB on the list. It is very much like
1550 * tcp_fragment() except that it may make several kinds of assumptions
1551 * in order to speed up the splitting operation. In particular, we
1552 * know that all the data is in scatter-gather pages, and that the
1553 * packet has never been sent out before (and thus is not cloned).
1554 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001555static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
Eric Dumazetc4ead4c2010-06-24 01:00:22 +00001556 unsigned int mss_now, gfp_t gfp)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001557{
1558 struct sk_buff *buff;
1559 int nlen = skb->len - len;
Ilpo Järvinen9ce01462009-02-28 04:44:42 +00001560 u8 flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001561
1562 /* All of a TSO frame must be composed of paged data. */
Herbert Xuc8ac3772005-08-16 20:43:40 -07001563 if (skb->len != skb->data_len)
1564 return tcp_fragment(sk, skb, len, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001565
Eric Dumazetc4ead4c2010-06-24 01:00:22 +00001566 buff = sk_stream_alloc_skb(sk, 0, gfp);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001567 if (unlikely(buff == NULL))
1568 return -ENOMEM;
1569
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001570 sk->sk_wmem_queued += buff->truesize;
1571 sk_mem_charge(sk, buff->truesize);
Herbert Xub60b49e2006-04-19 21:35:00 -07001572 buff->truesize += nlen;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001573 skb->truesize -= nlen;
1574
1575 /* Correct the sequence numbers. */
1576 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1577 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1578 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1579
1580 /* PSH and FIN should only be set in the second packet. */
Eric Dumazet4de075e2011-09-27 13:25:05 -04001581 flags = TCP_SKB_CB(skb)->tcp_flags;
1582 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1583 TCP_SKB_CB(buff)->tcp_flags = flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001584
1585 /* This packet was never sent out yet, so no SACK bits. */
1586 TCP_SKB_CB(buff)->sacked = 0;
1587
Patrick McHardy84fa7932006-08-29 16:44:56 -07001588 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001589 skb_split(skb, buff, len);
1590
1591 /* Fix up tso_factor for both original and new SKB. */
David S. Miller846998a2005-08-04 19:52:01 -07001592 tcp_set_skb_tso_segs(sk, skb, mss_now);
1593 tcp_set_skb_tso_segs(sk, buff, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001594
1595 /* Link BUFF into the send queue. */
1596 skb_header_release(buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001597 tcp_insert_write_queue_after(skb, buff, sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001598
1599 return 0;
1600}
1601
1602/* Try to defer sending, if possible, in order to minimize the amount
1603 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1604 *
1605 * This algorithm is from John Heffner.
1606 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001607static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001608{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001609 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001610 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001611 u32 send_win, cong_win, limit, in_flight;
Eric Dumazetad9f4f52010-12-07 12:03:55 +00001612 int win_divisor;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001613
Eric Dumazet4de075e2011-09-27 13:25:05 -04001614 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
John Heffnerae8064a2006-10-18 20:36:48 -07001615 goto send_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001616
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001617 if (icsk->icsk_ca_state != TCP_CA_Open)
John Heffnerae8064a2006-10-18 20:36:48 -07001618 goto send_now;
1619
1620 /* Defer for less than two clock ticks. */
Ilpo Järvinenbd515c32007-12-20 20:36:03 -08001621 if (tp->tso_deferred &&
Ilpo Järvinena2acde02008-12-05 22:49:37 -08001622 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
John Heffnerae8064a2006-10-18 20:36:48 -07001623 goto send_now;
David S. Miller908a75c2005-07-05 15:43:58 -07001624
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001625 in_flight = tcp_packets_in_flight(tp);
1626
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001627 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001628
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001629 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001630
1631 /* From in_flight test above, we know that cwnd > in_flight. */
1632 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1633
1634 limit = min(send_win, cong_win);
1635
David S. Millerba244fe2006-03-11 18:51:49 -08001636 /* If a full-sized TSO skb can be sent, do it. */
Ben Hutchings14853482012-07-30 16:11:42 +00001637 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
Eric Dumazet5e25ba52013-08-27 05:46:32 -07001638 tp->xmit_size_goal_segs * tp->mss_cache))
John Heffnerae8064a2006-10-18 20:36:48 -07001639 goto send_now;
David S. Millerba244fe2006-03-11 18:51:49 -08001640
Ilpo Järvinen62ad2762009-02-28 04:44:29 +00001641 /* Middle in queue won't get any more data, full sendable already? */
1642 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1643 goto send_now;
1644
Eric Dumazetad9f4f52010-12-07 12:03:55 +00001645 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1646 if (win_divisor) {
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001647 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1648
1649 /* If at least some fraction of a window is available,
1650 * just use it.
1651 */
Eric Dumazetad9f4f52010-12-07 12:03:55 +00001652 chunk /= win_divisor;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001653 if (limit >= chunk)
John Heffnerae8064a2006-10-18 20:36:48 -07001654 goto send_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001655 } else {
1656 /* Different approach, try not to defer past a single
1657 * ACK. Receiver should ACK every other full sized
1658 * frame, so if we have space for more than 3 frames
1659 * then send now.
1660 */
Neal Cardwell6b5a5c02011-11-21 17:15:14 +00001661 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
John Heffnerae8064a2006-10-18 20:36:48 -07001662 goto send_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001663 }
1664
Eric Dumazetf4541d62013-03-21 17:36:09 +00001665 /* Ok, it looks like it is advisable to defer.
1666 * Do not rearm the timer if already set to not break TCP ACK clocking.
1667 */
1668 if (!tp->tso_deferred)
1669 tp->tso_deferred = 1 | (jiffies << 1);
John Heffnerae8064a2006-10-18 20:36:48 -07001670
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001671 return true;
John Heffnerae8064a2006-10-18 20:36:48 -07001672
1673send_now:
1674 tp->tso_deferred = 0;
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001675 return false;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001676}
1677
John Heffner5d424d52006-03-20 17:53:41 -08001678/* Create a new MTU probe if we are ready.
Andi Kleen67edfef2009-07-21 23:00:40 +00001679 * MTU probe is regularly attempting to increase the path MTU by
1680 * deliberately sending larger packets. This discovers routing
1681 * changes resulting in larger path MTUs.
1682 *
John Heffner5d424d52006-03-20 17:53:41 -08001683 * Returns 0 if we should wait to probe (no cwnd available),
1684 * 1 if a probe was sent,
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001685 * -1 otherwise
1686 */
John Heffner5d424d52006-03-20 17:53:41 -08001687static int tcp_mtu_probe(struct sock *sk)
1688{
1689 struct tcp_sock *tp = tcp_sk(sk);
1690 struct inet_connection_sock *icsk = inet_csk(sk);
1691 struct sk_buff *skb, *nskb, *next;
1692 int len;
1693 int probe_size;
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001694 int size_needed;
John Heffner5d424d52006-03-20 17:53:41 -08001695 int copy;
1696 int mss_now;
1697
1698 /* Not currently probing/verifying,
1699 * not in recovery,
1700 * have enough cwnd, and
1701 * not SACKing (the variable headers throw things off) */
1702 if (!icsk->icsk_mtup.enabled ||
1703 icsk->icsk_mtup.probe_size ||
1704 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1705 tp->snd_cwnd < 11 ||
Ilpo Järvinencabeccb2009-02-28 04:44:38 +00001706 tp->rx_opt.num_sacks || tp->rx_opt.dsack)
John Heffner5d424d52006-03-20 17:53:41 -08001707 return -1;
1708
1709 /* Very simple search strategy: just double the MSS. */
Ilpo Järvinen0c54b852009-03-14 14:23:05 +00001710 mss_now = tcp_current_mss(sk);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001711 probe_size = 2 * tp->mss_cache;
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001712 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
John Heffner5d424d52006-03-20 17:53:41 -08001713 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1714 /* TODO: set timer for probe_converge_event */
1715 return -1;
1716 }
1717
1718 /* Have enough data in the send queue to probe? */
Ilpo Järvinen7f9c33e2007-11-23 19:10:56 +08001719 if (tp->write_seq - tp->snd_nxt < size_needed)
John Heffner5d424d52006-03-20 17:53:41 -08001720 return -1;
1721
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001722 if (tp->snd_wnd < size_needed)
1723 return -1;
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001724 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001725 return 0;
John Heffner5d424d52006-03-20 17:53:41 -08001726
Ilpo Järvinend67c58e2007-12-02 00:48:01 +02001727 /* Do we need to wait to drain cwnd? With none in flight, don't stall */
1728 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1729 if (!tcp_packets_in_flight(tp))
John Heffner5d424d52006-03-20 17:53:41 -08001730 return -1;
1731 else
1732 return 0;
1733 }
1734
1735 /* We're allowed to probe. Build it now. */
1736 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1737 return -1;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001738 sk->sk_wmem_queued += nskb->truesize;
1739 sk_mem_charge(sk, nskb->truesize);
John Heffner5d424d52006-03-20 17:53:41 -08001740
David S. Millerfe067e82007-03-07 12:12:44 -08001741 skb = tcp_send_head(sk);
John Heffner5d424d52006-03-20 17:53:41 -08001742
1743 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1744 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
Eric Dumazet4de075e2011-09-27 13:25:05 -04001745 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
John Heffner5d424d52006-03-20 17:53:41 -08001746 TCP_SKB_CB(nskb)->sacked = 0;
1747 nskb->csum = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07001748 nskb->ip_summed = skb->ip_summed;
John Heffner5d424d52006-03-20 17:53:41 -08001749
Ilpo Järvinen50c48172007-12-02 00:48:00 +02001750 tcp_insert_write_queue_before(nskb, skb, sk);
1751
John Heffner5d424d52006-03-20 17:53:41 -08001752 len = 0;
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001753 tcp_for_write_queue_from_safe(skb, next, sk) {
John Heffner5d424d52006-03-20 17:53:41 -08001754 copy = min_t(int, skb->len, probe_size - len);
1755 if (nskb->ip_summed)
1756 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1757 else
1758 nskb->csum = skb_copy_and_csum_bits(skb, 0,
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001759 skb_put(nskb, copy),
1760 copy, nskb->csum);
John Heffner5d424d52006-03-20 17:53:41 -08001761
1762 if (skb->len <= copy) {
1763 /* We've eaten all the data from this skb.
1764 * Throw it away. */
Eric Dumazet4de075e2011-09-27 13:25:05 -04001765 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
David S. Millerfe067e82007-03-07 12:12:44 -08001766 tcp_unlink_write_queue(skb, sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001767 sk_wmem_free_skb(sk, skb);
John Heffner5d424d52006-03-20 17:53:41 -08001768 } else {
Eric Dumazet4de075e2011-09-27 13:25:05 -04001769 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
Changli Gaoa3433f32010-06-12 14:01:43 +00001770 ~(TCPHDR_FIN|TCPHDR_PSH);
John Heffner5d424d52006-03-20 17:53:41 -08001771 if (!skb_shinfo(skb)->nr_frags) {
1772 skb_pull(skb, copy);
Patrick McHardy84fa7932006-08-29 16:44:56 -07001773 if (skb->ip_summed != CHECKSUM_PARTIAL)
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001774 skb->csum = csum_partial(skb->data,
1775 skb->len, 0);
John Heffner5d424d52006-03-20 17:53:41 -08001776 } else {
1777 __pskb_trim_head(skb, copy);
1778 tcp_set_skb_tso_segs(sk, skb, mss_now);
1779 }
1780 TCP_SKB_CB(skb)->seq += copy;
1781 }
1782
1783 len += copy;
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001784
1785 if (len >= probe_size)
1786 break;
John Heffner5d424d52006-03-20 17:53:41 -08001787 }
1788 tcp_init_tso_segs(sk, nskb, nskb->len);
1789
1790 /* We're ready to send. If this fails, the probe will
1791 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1792 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1793 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1794 /* Decrement cwnd here because we are sending
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001795 * effectively two packets. */
John Heffner5d424d52006-03-20 17:53:41 -08001796 tp->snd_cwnd--;
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -08001797 tcp_event_new_data_sent(sk, nskb);
John Heffner5d424d52006-03-20 17:53:41 -08001798
1799 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
John Heffner0e7b1362006-03-20 21:32:58 -08001800 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1801 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
John Heffner5d424d52006-03-20 17:53:41 -08001802
1803 return 1;
1804 }
1805
1806 return -1;
1807}
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809/* This routine writes packets to the network. It advances the
1810 * send_head. This happens as incoming acks open up the remote
1811 * window for us.
1812 *
Ilpo Järvinenf8269a42008-12-03 21:24:48 -08001813 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1814 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1815 * account rare use of URG, this is not a big flaw.
1816 *
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00001817 * Send at most one packet when push_one > 0. Temporarily ignore
1818 * cwnd limit to force at most one packet out when push_one == 2.
1819
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001820 * Returns true, if no segments are in flight and we have queued segments,
1821 * but cannot send anything now because of SWS or another problem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001823static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1824 int push_one, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825{
1826 struct tcp_sock *tp = tcp_sk(sk);
David S. Miller92df7b52005-07-05 15:19:06 -07001827 struct sk_buff *skb;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001828 unsigned int tso_segs, sent_pkts;
1829 int cwnd_quota;
John Heffner5d424d52006-03-20 17:53:41 -08001830 int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
David S. Miller92df7b52005-07-05 15:19:06 -07001832 sent_pkts = 0;
John Heffner5d424d52006-03-20 17:53:41 -08001833
Ilpo Järvinend5dd9172008-12-05 22:48:55 -08001834 if (!push_one) {
1835 /* Do MTU probing. */
1836 result = tcp_mtu_probe(sk);
1837 if (!result) {
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001838 return false;
Ilpo Järvinend5dd9172008-12-05 22:48:55 -08001839 } else if (result > 0) {
1840 sent_pkts = 1;
1841 }
John Heffner5d424d52006-03-20 17:53:41 -08001842 }
1843
David S. Millerfe067e82007-03-07 12:12:44 -08001844 while ((skb = tcp_send_head(sk))) {
Herbert Xuc8ac3772005-08-16 20:43:40 -07001845 unsigned int limit;
1846
Herbert Xub68e9f82005-08-04 19:52:02 -07001847 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001848 BUG_ON(!tso_segs);
David S. Milleraa934662005-07-05 15:20:09 -07001849
Andrew Vaginec342322012-11-15 04:03:17 +00001850 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1851 goto repair; /* Skip network transmission */
1852
Herbert Xub68e9f82005-08-04 19:52:02 -07001853 cwnd_quota = tcp_cwnd_test(tp, skb);
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00001854 if (!cwnd_quota) {
1855 if (push_one == 2)
1856 /* Force out a loss probe pkt. */
1857 cwnd_quota = 1;
1858 else
1859 break;
1860 }
Herbert Xub68e9f82005-08-04 19:52:02 -07001861
1862 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1863 break;
1864
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001865 if (tso_segs == 1) {
1866 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1867 (tcp_skb_is_last(sk, skb) ?
1868 nonagle : TCP_NAGLE_PUSH))))
1869 break;
1870 } else {
Ilpo Järvinend5dd9172008-12-05 22:48:55 -08001871 if (!push_one && tcp_tso_should_defer(sk, skb))
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001872 break;
1873 }
David S. Milleraa934662005-07-05 15:20:09 -07001874
Eric Dumazet0ae5f472013-09-27 03:28:54 -07001875 /* TCP Small Queues :
1876 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1877 * This allows for :
1878 * - better RTT estimation and ACK scheduling
1879 * - faster recovery
1880 * - high rates
Eric Dumazet6ef30bda2013-11-13 06:32:54 -08001881 * Alas, some drivers / subsystems require a fair amount
1882 * of queued bytes to ensure line rate.
1883 * One example is wifi aggregation (802.11 AMPDU)
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001884 */
Eric Dumazet6ef30bda2013-11-13 06:32:54 -08001885 limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
1886 sk->sk_pacing_rate >> 10);
Eric Dumazet0ae5f472013-09-27 03:28:54 -07001887
1888 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001889 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
John Ogness94ee16a2014-02-09 18:40:11 -08001890 /* It is possible TX completion already happened
1891 * before we set TSQ_THROTTLED, so we must
1892 * test again the condition.
1893 * We abuse smp_mb__after_clear_bit() because
1894 * there is no smp_mb__after_set_bit() yet
1895 */
1896 smp_mb__after_clear_bit();
1897 if (atomic_read(&sk->sk_wmem_alloc) > limit)
1898 break;
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001899 }
Eric Dumazet0ae5f472013-09-27 03:28:54 -07001900
Herbert Xuc8ac3772005-08-16 20:43:40 -07001901 limit = mss_now;
Ilpo Järvinenf8269a42008-12-03 21:24:48 -08001902 if (tso_segs > 1 && !tcp_urg_mode(tp))
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001903 limit = tcp_mss_split_point(sk, skb, mss_now,
Ben Hutchings14853482012-07-30 16:11:42 +00001904 min_t(unsigned int,
1905 cwnd_quota,
1906 sk->sk_gso_max_segs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
Herbert Xuc8ac3772005-08-16 20:43:40 -07001908 if (skb->len > limit &&
Eric Dumazetc4ead4c2010-06-24 01:00:22 +00001909 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
Herbert Xuc8ac3772005-08-16 20:43:40 -07001910 break;
1911
David S. Miller92df7b52005-07-05 15:19:06 -07001912 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001913
Ilpo Järvinend5dd9172008-12-05 22:48:55 -08001914 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
David S. Miller92df7b52005-07-05 15:19:06 -07001915 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
Andrew Vaginec342322012-11-15 04:03:17 +00001917repair:
David S. Miller92df7b52005-07-05 15:19:06 -07001918 /* Advance the send_head. This one is sent out.
1919 * This call will increment packets_out.
1920 */
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -08001921 tcp_event_new_data_sent(sk, skb);
David S. Miller92df7b52005-07-05 15:19:06 -07001922
1923 tcp_minshall_update(tp, mss_now, skb);
Nandita Dukkipatia262f0c2011-08-21 20:21:57 +00001924 sent_pkts += tcp_skb_pcount(skb);
Ilpo Järvinend5dd9172008-12-05 22:48:55 -08001925
1926 if (push_one)
1927 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 }
David S. Miller92df7b52005-07-05 15:19:06 -07001929
David S. Milleraa934662005-07-05 15:20:09 -07001930 if (likely(sent_pkts)) {
Yuchung Cheng684bad12012-09-02 17:38:04 +00001931 if (tcp_in_cwnd_reduction(sk))
1932 tp->prr_out += sent_pkts;
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00001933
1934 /* Send one loss probe per tail loss episode. */
1935 if (push_one != 2)
1936 tcp_schedule_loss_probe(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001937 tcp_cwnd_validate(sk);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001938 return false;
David S. Miller92df7b52005-07-05 15:19:06 -07001939 }
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00001940 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
1941}
1942
1943bool tcp_schedule_loss_probe(struct sock *sk)
1944{
1945 struct inet_connection_sock *icsk = inet_csk(sk);
1946 struct tcp_sock *tp = tcp_sk(sk);
1947 u32 timeout, tlp_time_stamp, rto_time_stamp;
1948 u32 rtt = tp->srtt >> 3;
1949
1950 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
1951 return false;
1952 /* No consecutive loss probes. */
1953 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
1954 tcp_rearm_rto(sk);
1955 return false;
1956 }
1957 /* Don't do any loss probe on a Fast Open connection before 3WHS
1958 * finishes.
1959 */
1960 if (sk->sk_state == TCP_SYN_RECV)
1961 return false;
1962
1963 /* TLP is only scheduled when next timer event is RTO. */
1964 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
1965 return false;
1966
1967 /* Schedule a loss probe in 2*RTT for SACK capable connections
1968 * in Open state, that are either limited by cwnd or application.
1969 */
1970 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
1971 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
1972 return false;
1973
1974 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
1975 tcp_send_head(sk))
1976 return false;
1977
1978 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
1979 * for delayed ack when there's one outstanding packet.
1980 */
1981 timeout = rtt << 1;
1982 if (tp->packets_out == 1)
1983 timeout = max_t(u32, timeout,
1984 (rtt + (rtt >> 1) + TCP_DELACK_MAX));
1985 timeout = max_t(u32, timeout, msecs_to_jiffies(10));
1986
1987 /* If RTO is shorter, just schedule TLP in its place. */
1988 tlp_time_stamp = tcp_time_stamp + timeout;
1989 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
1990 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
1991 s32 delta = rto_time_stamp - tcp_time_stamp;
1992 if (delta > 0)
1993 timeout = delta;
1994 }
1995
1996 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
1997 TCP_RTO_MAX);
1998 return true;
1999}
2000
2001/* When probe timeout (PTO) fires, send a new segment if one exists, else
2002 * retransmit the last segment.
2003 */
2004void tcp_send_loss_probe(struct sock *sk)
2005{
Nandita Dukkipati9b717a82013-03-11 10:00:44 +00002006 struct tcp_sock *tp = tcp_sk(sk);
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002007 struct sk_buff *skb;
2008 int pcount;
2009 int mss = tcp_current_mss(sk);
2010 int err = -1;
2011
2012 if (tcp_send_head(sk) != NULL) {
2013 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2014 goto rearm_timer;
2015 }
2016
Nandita Dukkipati9b717a82013-03-11 10:00:44 +00002017 /* At most one outstanding TLP retransmission. */
2018 if (tp->tlp_high_seq)
2019 goto rearm_timer;
2020
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002021 /* Retransmit last segment. */
2022 skb = tcp_write_queue_tail(sk);
2023 if (WARN_ON(!skb))
2024 goto rearm_timer;
2025
2026 pcount = tcp_skb_pcount(skb);
2027 if (WARN_ON(!pcount))
2028 goto rearm_timer;
2029
2030 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2031 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
2032 goto rearm_timer;
2033 skb = tcp_write_queue_tail(sk);
2034 }
2035
2036 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2037 goto rearm_timer;
2038
2039 /* Probe with zero data doesn't trigger fast recovery. */
2040 if (skb->len > 0)
2041 err = __tcp_retransmit_skb(sk, skb);
2042
Nandita Dukkipati9b717a82013-03-11 10:00:44 +00002043 /* Record snd_nxt for loss detection. */
2044 if (likely(!err))
2045 tp->tlp_high_seq = tp->snd_nxt;
2046
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002047rearm_timer:
2048 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2049 inet_csk(sk)->icsk_rto,
2050 TCP_RTO_MAX);
2051
2052 if (likely(!err))
2053 NET_INC_STATS_BH(sock_net(sk),
2054 LINUX_MIB_TCPLOSSPROBES);
2055 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056}
2057
David S. Millera762a982005-07-05 15:18:51 -07002058/* Push out any pending frames which were held back due to
2059 * TCP_CORK or attempt at coalescing tiny packets.
2060 * The socket must be locked by the caller.
2061 */
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002062void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2063 int nonagle)
David S. Millera762a982005-07-05 15:18:51 -07002064{
Ilpo Järvinen726e07a2008-12-05 22:43:56 -08002065 /* If we are closed, the bytes will have to remain here.
2066 * In time closedown will finish, we empty the write queue and
2067 * all will be happy.
2068 */
2069 if (unlikely(sk->sk_state == TCP_CLOSE))
2070 return;
2071
Mel Gorman99a1dec2012-07-31 16:44:14 -07002072 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2073 sk_gfp_atomic(sk, GFP_ATOMIC)))
Ilpo Järvinen726e07a2008-12-05 22:43:56 -08002074 tcp_check_probe_timer(sk);
David S. Millera762a982005-07-05 15:18:51 -07002075}
2076
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002077/* Send _single_ skb sitting at the send head. This function requires
2078 * true push pending frames to setup probe timer etc.
2079 */
2080void tcp_push_one(struct sock *sk, unsigned int mss_now)
2081{
David S. Millerfe067e82007-03-07 12:12:44 -08002082 struct sk_buff *skb = tcp_send_head(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002083
2084 BUG_ON(!skb || skb->len < mss_now);
2085
Ilpo Järvinend5dd9172008-12-05 22:48:55 -08002086 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002087}
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089/* This function returns the amount that we can raise the
2090 * usable window based on the following constraints
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002091 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 * 1. The window can never be shrunk once it is offered (RFC 793)
2093 * 2. We limit memory per socket
2094 *
2095 * RFC 1122:
2096 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2097 * RECV.NEXT + RCV.WIN fixed until:
2098 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2099 *
2100 * i.e. don't raise the right edge of the window until you can raise
2101 * it at least MSS bytes.
2102 *
2103 * Unfortunately, the recommended algorithm breaks header prediction,
2104 * since header prediction assumes th->window stays fixed.
2105 *
2106 * Strictly speaking, keeping th->window fixed violates the receiver
2107 * side SWS prevention criteria. The problem is that under this rule
2108 * a stream of single byte packets will cause the right side of the
2109 * window to always advance by a single byte.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002110 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 * Of course, if the sender implements sender side SWS prevention
2112 * then this will not be a problem.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002113 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 * BSD seems to make the following compromise:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002115 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 * If the free space is less than the 1/4 of the maximum
2117 * space available and the free space is less than 1/2 mss,
2118 * then set the window to 0.
2119 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2120 * Otherwise, just prevent the window from shrinking
2121 * and from being larger than the largest representable value.
2122 *
2123 * This prevents incremental opening of the window in the regime
2124 * where TCP is limited by the speed of the reader side taking
2125 * data out of the TCP receive queue. It does nothing about
2126 * those cases where the window is constrained on the sender side
2127 * because the pipeline is full.
2128 *
2129 * BSD also seems to "accidentally" limit itself to windows that are a
2130 * multiple of MSS, at least until the free space gets quite small.
2131 * This would appear to be a side effect of the mbuf implementation.
2132 * Combining these two algorithms results in the observed behavior
2133 * of having a fixed window size at almost all times.
2134 *
2135 * Below we obtain similar behavior by forcing the offered window to
2136 * a multiple of the mss when it is feasible to do so.
2137 *
2138 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2139 * Regular options like TIMESTAMP are taken into account.
2140 */
2141u32 __tcp_select_window(struct sock *sk)
2142{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002143 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08002145 /* MSS for the peer's data. Previous versions used mss_clamp
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 * here. I don't know if the value based on our guesses
2147 * of peer's MSS is better for the performance. It's more correct
2148 * but may be worse for the performance because of rcv_mss
2149 * fluctuations. --SAW 1998/11/1
2150 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002151 int mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 int free_space = tcp_space(sk);
2153 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
2154 int window;
2155
2156 if (mss > full_space)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002157 mss = full_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
Eric Dumazetb92edbe2007-12-20 21:48:32 -08002159 if (free_space < (full_space >> 1)) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002160 icsk->icsk_ack.quick = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Glauber Costa180d8cd2011-12-11 21:47:02 +00002162 if (sk_under_memory_pressure(sk))
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002163 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2164 4U * tp->advmss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 if (free_space < mss)
2167 return 0;
2168 }
2169
2170 if (free_space > tp->rcv_ssthresh)
2171 free_space = tp->rcv_ssthresh;
2172
2173 /* Don't do rounding if we are using window scaling, since the
2174 * scaled window will not line up with the MSS boundary anyway.
2175 */
2176 window = tp->rcv_wnd;
2177 if (tp->rx_opt.rcv_wscale) {
2178 window = free_space;
2179
2180 /* Advertise enough space so that it won't get scaled away.
2181 * Import case: prevent zero window announcement if
2182 * 1<<rcv_wscale > mss.
2183 */
2184 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
2185 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
2186 << tp->rx_opt.rcv_wscale);
2187 } else {
2188 /* Get the largest window that is a nice multiple of mss.
2189 * Window clamp already applied above.
2190 * If our current window offering is within 1 mss of the
2191 * free space we just keep it. This prevents the divide
2192 * and multiply from happening most of the time.
2193 * We also don't do any window rounding when the free space
2194 * is too small.
2195 */
2196 if (window <= free_space - mss || window > free_space)
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002197 window = (free_space / mss) * mss;
John Heffner84565072007-04-02 13:56:32 -07002198 else if (mss == full_space &&
Eric Dumazetb92edbe2007-12-20 21:48:32 -08002199 free_space > window + (full_space >> 1))
John Heffner84565072007-04-02 13:56:32 -07002200 window = free_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 }
2202
2203 return window;
2204}
2205
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002206/* Collapses two adjacent SKB's during retransmission. */
2207static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208{
2209 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08002210 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002211 int skb_size, next_skb_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002213 skb_size = skb->len;
2214 next_skb_size = next_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002216 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
Ilpo Järvinena6963a62007-09-25 22:44:14 -07002217
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002218 tcp_highest_sack_combine(sk, next_skb, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002220 tcp_unlink_write_queue(next_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002222 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
2223 next_skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002225 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2226 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002228 if (skb->ip_summed != CHECKSUM_PARTIAL)
2229 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002231 /* Update sequence range on original skb. */
2232 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Ilpo Järvinene6c7d082009-02-28 04:44:35 +00002234 /* Merge over control information. This moves PSH/FIN etc. over */
Eric Dumazet4de075e2011-09-27 13:25:05 -04002235 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002237 /* All done, get rid of second SKB and account for it so
2238 * packet counting does not break.
2239 */
2240 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07002241
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002242 /* changed transmit queue under us so clear hints */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07002243 tcp_clear_retrans_hints_partial(tp);
2244 if (next_skb == tp->retransmit_skb_hint)
2245 tp->retransmit_skb_hint = skb;
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002246
Ilpo Järvinen797108d2009-04-01 23:15:17 +00002247 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2248
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002249 sk_wmem_free_skb(sk, next_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250}
2251
Andi Kleen67edfef2009-07-21 23:00:40 +00002252/* Check if coalescing SKBs is legal. */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002253static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002254{
2255 if (tcp_skb_pcount(skb) > 1)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002256 return false;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002257 /* TODO: SACK collapsing could be used to remove this condition */
2258 if (skb_shinfo(skb)->nr_frags != 0)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002259 return false;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002260 if (skb_cloned(skb))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002261 return false;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002262 if (skb == tcp_send_head(sk))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002263 return false;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002264 /* Some heurestics for collapsing over SACK'd could be invented */
2265 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002266 return false;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002267
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002268 return true;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002269}
2270
Andi Kleen67edfef2009-07-21 23:00:40 +00002271/* Collapse packets in the retransmit queue to make to create
2272 * less packets on the wire. This is only done on retransmission.
2273 */
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002274static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2275 int space)
2276{
2277 struct tcp_sock *tp = tcp_sk(sk);
2278 struct sk_buff *skb = to, *tmp;
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002279 bool first = true;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002280
2281 if (!sysctl_tcp_retrans_collapse)
2282 return;
Eric Dumazet4de075e2011-09-27 13:25:05 -04002283 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002284 return;
2285
2286 tcp_for_write_queue_from_safe(skb, tmp, sk) {
2287 if (!tcp_can_collapse(sk, skb))
2288 break;
2289
2290 space -= skb->len;
2291
2292 if (first) {
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002293 first = false;
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002294 continue;
2295 }
2296
2297 if (space < 0)
2298 break;
2299 /* Punt if not enough space exists in the first SKB for
2300 * the data in the second
2301 */
Eric Dumazeta21d4572012-04-10 20:30:48 +00002302 if (skb->len > skb_availroom(to))
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002303 break;
2304
2305 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2306 break;
2307
2308 tcp_collapse_retrans(sk, to);
2309 }
2310}
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312/* This retransmits one SKB. Policy decisions and retransmit queue
2313 * state updates are done by the caller. Returns non-zero if an
2314 * error occurred which prevented the send.
2315 */
Yuchung Cheng93b174a2012-12-06 08:45:32 +00002316int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317{
2318 struct tcp_sock *tp = tcp_sk(sk);
John Heffner5d424d52006-03-20 17:53:41 -08002319 struct inet_connection_sock *icsk = inet_csk(sk);
Sridhar Samudrala7d227cd22008-05-21 16:42:20 -07002320 unsigned int cur_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
John Heffner5d424d52006-03-20 17:53:41 -08002322 /* Inconslusive MTU probe */
2323 if (icsk->icsk_mtup.probe_size) {
2324 icsk->icsk_mtup.probe_size = 0;
2325 }
2326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 /* Do not sent more than we queued. 1/4 is reserved for possible
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08002328 * copying overhead: fragmentation, tunneling, mangling etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 */
2330 if (atomic_read(&sk->sk_wmem_alloc) >
2331 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2332 return -EAGAIN;
2333
2334 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2335 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2336 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2338 return -ENOMEM;
2339 }
2340
Sridhar Samudrala7d227cd22008-05-21 16:42:20 -07002341 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2342 return -EHOSTUNREACH; /* Routing failure or similar. */
2343
Ilpo Järvinen0c54b852009-03-14 14:23:05 +00002344 cur_mss = tcp_current_mss(sk);
Sridhar Samudrala7d227cd22008-05-21 16:42:20 -07002345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 /* If receiver has shrunk his window, and skb is out of
2347 * new window, do not retransmit it. The exception is the
2348 * case, when window is shrunk to zero. In this case
2349 * our retransmit serves as a zero window probe.
2350 */
Joe Perches9d4fb272009-11-23 10:41:23 -08002351 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2352 TCP_SKB_CB(skb)->seq != tp->snd_una)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 return -EAGAIN;
2354
2355 if (skb->len > cur_mss) {
David S. Miller846998a2005-08-04 19:52:01 -07002356 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 return -ENOMEM; /* We'll try again later. */
Ilpo Järvinen02276f32009-02-28 04:44:31 +00002358 } else {
Ilpo Järvinen9eb93622009-04-01 23:18:20 +00002359 int oldpcount = tcp_skb_pcount(skb);
2360
2361 if (unlikely(oldpcount > 1)) {
Eric Dumazetb81908e2013-10-15 11:54:30 -07002362 if (skb_unclone(skb, GFP_ATOMIC))
2363 return -ENOMEM;
Ilpo Järvinen9eb93622009-04-01 23:18:20 +00002364 tcp_init_tso_segs(sk, skb, cur_mss);
2365 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 }
2368
Ilpo Järvinen4a17fc32008-11-24 21:03:43 -08002369 tcp_retrans_try_collapse(sk, skb, cur_mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 /* Some Solaris stacks overoptimize and ignore the FIN on a
2372 * retransmit when old data is attached. So strip it off
2373 * since it is cheap to do so and saves bytes on the network.
2374 */
Stephen Hemminger2de979b2007-03-08 20:45:19 -08002375 if (skb->len > 0 &&
Eric Dumazet4de075e2011-09-27 13:25:05 -04002376 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
Stephen Hemminger2de979b2007-03-08 20:45:19 -08002377 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 if (!pskb_trim(skb, 0)) {
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002379 /* Reuse, even though it does some unnecessary work */
2380 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
Eric Dumazet4de075e2011-09-27 13:25:05 -04002381 TCP_SKB_CB(skb)->tcp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 skb->ip_summed = CHECKSUM_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 }
2384 }
2385
2386 /* Make a copy, if the first transmission SKB clone we made
2387 * is still in somebody's hands, else make a clone.
2388 */
2389 TCP_SKB_CB(skb)->when = tcp_time_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
Thomas Graf50bceae2013-04-11 10:57:18 +00002391 /* make sure skb->data is aligned on arches that require it
2392 * and check if ack-trimming & collapsing extended the headroom
2393 * beyond what csum_start can cover.
2394 */
2395 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2396 skb_headroom(skb) >= 0xFFFF)) {
Eric Dumazet117632e2011-12-03 21:39:53 +00002397 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2398 GFP_ATOMIC);
Yuchung Cheng93b174a2012-12-06 08:45:32 +00002399 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2400 -ENOBUFS;
Eric Dumazet117632e2011-12-03 21:39:53 +00002401 } else {
Yuchung Cheng93b174a2012-12-06 08:45:32 +00002402 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
Eric Dumazet117632e2011-12-03 21:39:53 +00002403 }
Yuchung Cheng93b174a2012-12-06 08:45:32 +00002404}
2405
2406int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2407{
2408 struct tcp_sock *tp = tcp_sk(sk);
2409 int err = __tcp_retransmit_skb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411 if (err == 0) {
2412 /* Update global TCP statistics. */
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07002413 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
2415 tp->total_retrans++;
2416
2417#if FASTRETRANS_DEBUG > 0
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002418 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
Joe Perchese87cc472012-05-13 21:56:26 +00002419 net_dbg_ratelimited("retrans_out leaked\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 }
2421#endif
Ilpo Järvinenb08d6cb2007-10-11 17:36:13 -07002422 if (!tp->retrans_out)
2423 tp->lost_retrans_low = tp->snd_nxt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2425 tp->retrans_out += tcp_skb_pcount(skb);
2426
2427 /* Save stamp of the first retransmit. */
2428 if (!tp->retrans_stamp)
2429 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2430
Yuchung Chengc24f6912011-02-07 12:57:04 +00002431 tp->undo_retrans += tcp_skb_pcount(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
2433 /* snd_nxt is stored to detect loss of retransmitted segment,
2434 * see tcp_input.c tcp_sacktag_write_queue().
2435 */
2436 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2437 }
2438 return err;
2439}
2440
Andi Kleen67edfef2009-07-21 23:00:40 +00002441/* Check if we forward retransmits are possible in the current
2442 * window/congestion state.
2443 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002444static bool tcp_can_forward_retransmit(struct sock *sk)
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07002445{
2446 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002447 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07002448
2449 /* Forward retransmissions are possible only during Recovery. */
2450 if (icsk->icsk_ca_state != TCP_CA_Recovery)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002451 return false;
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07002452
2453 /* No forward retransmissions in Reno are possible. */
2454 if (tcp_is_reno(tp))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002455 return false;
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07002456
2457 /* Yeah, we have to make difficult choice between forward transmission
2458 * and retransmission... Both ways have their merits...
2459 *
2460 * For now we do not retransmit anything, while we have some new
2461 * segments to send. In the other cases, follow rule 3 for
2462 * NextSeg() specified in RFC3517.
2463 */
2464
2465 if (tcp_may_send_now(sk))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002466 return false;
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07002467
Eric Dumazeta2a385d2012-05-16 23:15:34 +00002468 return true;
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07002469}
2470
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471/* This gets called after a retransmit timeout, and the initially
2472 * retransmitted data is acknowledged. It tries to continue
2473 * resending the rest of the retransmit queue, until either
2474 * we've sent it all or the congestion window limit is reached.
2475 * If doing SACK, the first ACK which comes back for a timeout
2476 * based retransmit packet might feed us FACK information again.
2477 * If so, we use it to avoid unnecessarily retransmissions.
2478 */
2479void tcp_xmit_retransmit_queue(struct sock *sk)
2480{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002481 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 struct tcp_sock *tp = tcp_sk(sk);
2483 struct sk_buff *skb;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002484 struct sk_buff *hole = NULL;
Ilpo Järvinen618d9f22008-09-20 21:26:22 -07002485 u32 last_lost;
Ilpo Järvinen61eb55f2008-09-20 21:22:59 -07002486 int mib_idx;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002487 int fwd_rexmitting = 0;
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08002488
Ilpo Järvinen45e77d32010-07-19 01:16:18 +00002489 if (!tp->packets_out)
2490 return;
2491
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002492 if (!tp->lost_out)
2493 tp->retransmit_high = tp->snd_una;
2494
Ilpo Järvinen618d9f22008-09-20 21:26:22 -07002495 if (tp->retransmit_skb_hint) {
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08002496 skb = tp->retransmit_skb_hint;
Ilpo Järvinen618d9f22008-09-20 21:26:22 -07002497 last_lost = TCP_SKB_CB(skb)->end_seq;
2498 if (after(last_lost, tp->retransmit_high))
2499 last_lost = tp->retransmit_high;
2500 } else {
David S. Millerfe067e82007-03-07 12:12:44 -08002501 skb = tcp_write_queue_head(sk);
Ilpo Järvinen618d9f22008-09-20 21:26:22 -07002502 last_lost = tp->snd_una;
2503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002505 tcp_for_write_queue_from(skb, sk) {
2506 __u8 sacked = TCP_SKB_CB(skb)->sacked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002508 if (skb == tcp_send_head(sk))
2509 break;
2510 /* we could do better than to assign each time */
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002511 if (hole == NULL)
2512 tp->retransmit_skb_hint = skb;
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08002513
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002514 /* Assume this retransmit will generate
2515 * only one packet for congestion window
2516 * calculation purposes. This works because
2517 * tcp_retransmit_skb() will chop up the
2518 * packet to be MSS sized and all the
2519 * packet counting works out.
2520 */
2521 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2522 return;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002523
2524 if (fwd_rexmitting) {
2525begin_fwd:
2526 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2527 break;
2528 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2529
2530 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
Ilpo Järvinen618d9f22008-09-20 21:26:22 -07002531 tp->retransmit_high = last_lost;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002532 if (!tcp_can_forward_retransmit(sk))
2533 break;
2534 /* Backtrack if necessary to non-L'ed skb */
2535 if (hole != NULL) {
2536 skb = hole;
2537 hole = NULL;
2538 }
2539 fwd_rexmitting = 1;
2540 goto begin_fwd;
2541
2542 } else if (!(sacked & TCPCB_LOST)) {
Ilpo Järvinenac11ba72009-02-28 04:44:27 +00002543 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002544 hole = skb;
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002545 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002547 } else {
Ilpo Järvinen618d9f22008-09-20 21:26:22 -07002548 last_lost = TCP_SKB_CB(skb)->end_seq;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002549 if (icsk->icsk_ca_state != TCP_CA_Loss)
2550 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2551 else
2552 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2553 }
2554
2555 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002556 continue;
Pavel Emelyanov40b215e2008-07-03 01:05:41 -07002557
Eric Dumazet09e9b812012-01-25 04:44:20 +00002558 if (tcp_retransmit_skb(sk, skb)) {
2559 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002560 return;
Eric Dumazet09e9b812012-01-25 04:44:20 +00002561 }
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002562 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2563
Yuchung Cheng684bad12012-09-02 17:38:04 +00002564 if (tcp_in_cwnd_reduction(sk))
Nandita Dukkipatia262f0c2011-08-21 20:21:57 +00002565 tp->prr_out += tcp_skb_pcount(skb);
2566
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002567 if (skb == tcp_write_queue_head(sk))
2568 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2569 inet_csk(sk)->icsk_rto,
2570 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574/* Send a fin. The caller locks the socket for us. This cannot be
2575 * allowed to fail queueing a FIN frame under any circumstances.
2576 */
2577void tcp_send_fin(struct sock *sk)
2578{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002579 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08002580 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 int mss_now;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 /* Optimization, tack on the FIN if we have a queue of
2584 * unsent frames. But be careful about outgoing SACKS
2585 * and IP options.
2586 */
Ilpo Järvinen0c54b852009-03-14 14:23:05 +00002587 mss_now = tcp_current_mss(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
David S. Millerfe067e82007-03-07 12:12:44 -08002589 if (tcp_send_head(sk) != NULL) {
Eric Dumazet4de075e2011-09-27 13:25:05 -04002590 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 TCP_SKB_CB(skb)->end_seq++;
2592 tp->write_seq++;
2593 } else {
2594 /* Socket is locked, keep trying until memory is available. */
2595 for (;;) {
Wu Fengguangaa133072009-09-02 23:45:45 -07002596 skb = alloc_skb_fclone(MAX_TCP_HEADER,
2597 sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 if (skb)
2599 break;
2600 yield();
2601 }
2602
2603 /* Reserve space for headers and prepare control bits. */
2604 skb_reserve(skb, MAX_TCP_HEADER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002606 tcp_init_nondata_skb(skb, tp->write_seq,
Changli Gaoa3433f32010-06-12 14:01:43 +00002607 TCPHDR_ACK | TCPHDR_FIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 tcp_queue_skb(sk, skb);
2609 }
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002610 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611}
2612
2613/* We get here when a process closes a file descriptor (either due to
2614 * an explicit close() or as a byproduct of exit()'ing) and there
2615 * was unread data in the receive queue. This behavior is recommended
Gerrit Renker65bb7232007-04-28 21:21:46 -07002616 * by RFC 2525, section 2.17. -DaveM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 */
Al Virodd0fc662005-10-07 07:46:04 +01002618void tcp_send_active_reset(struct sock *sk, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 struct sk_buff *skb;
2621
2622 /* NOTE: No TCP options attached and we never retransmit this. */
2623 skb = alloc_skb(MAX_TCP_HEADER, priority);
2624 if (!skb) {
Pavel Emelyanov4e673442008-07-16 20:30:14 -07002625 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 return;
2627 }
2628
2629 /* Reserve space for headers and prepare control bits. */
2630 skb_reserve(skb, MAX_TCP_HEADER);
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002631 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
Changli Gaoa3433f32010-06-12 14:01:43 +00002632 TCPHDR_ACK | TCPHDR_RST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 /* Send it off. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerdfb4b9d2005-12-06 16:24:52 -08002635 if (tcp_transmit_skb(sk, skb, 0, priority))
Pavel Emelyanov4e673442008-07-16 20:30:14 -07002636 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
Sridhar Samudrala26af65c2008-06-04 15:19:35 -07002637
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07002638 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639}
2640
Andi Kleen67edfef2009-07-21 23:00:40 +00002641/* Send a crossed SYN-ACK during socket establishment.
2642 * WARNING: This routine must only be called when we have already sent
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 * a SYN packet that crossed the incoming SYN that caused this routine
2644 * to get called. If this assumption fails then the initial rcv_wnd
2645 * and rcv_wscale values will not be correct.
2646 */
2647int tcp_send_synack(struct sock *sk)
2648{
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002649 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
David S. Millerfe067e82007-03-07 12:12:44 -08002651 skb = tcp_write_queue_head(sk);
Eric Dumazet4de075e2011-09-27 13:25:05 -04002652 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
Joe Perches91df42b2012-05-15 14:11:54 +00002653 pr_debug("%s: wrong queue state\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 return -EFAULT;
2655 }
Eric Dumazet4de075e2011-09-27 13:25:05 -04002656 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 if (skb_cloned(skb)) {
2658 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2659 if (nskb == NULL)
2660 return -ENOMEM;
David S. Millerfe067e82007-03-07 12:12:44 -08002661 tcp_unlink_write_queue(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 skb_header_release(nskb);
David S. Millerfe067e82007-03-07 12:12:44 -08002663 __tcp_add_write_queue_head(sk, nskb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002664 sk_wmem_free_skb(sk, skb);
2665 sk->sk_wmem_queued += nskb->truesize;
2666 sk_mem_charge(sk, nskb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 skb = nskb;
2668 }
2669
Eric Dumazet4de075e2011-09-27 13:25:05 -04002670 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 TCP_ECN_send_synack(tcp_sk(sk), skb);
2672 }
2673 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerdfb4b9d2005-12-06 16:24:52 -08002674 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675}
2676
Eric Dumazet4aea39c2012-06-03 20:33:21 +00002677/**
2678 * tcp_make_synack - Prepare a SYN-ACK.
2679 * sk: listener socket
2680 * dst: dst entry attached to the SYNACK
2681 * req: request_sock pointer
Eric Dumazet4aea39c2012-06-03 20:33:21 +00002682 *
2683 * Allocate one skb and build a SYNACK packet.
2684 * @dst is consumed : Caller should not use it again.
2685 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002686struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
William Allen Simpsone6b4d112009-12-02 18:07:39 +00002687 struct request_sock *req,
Jerry Chu83368862012-08-31 12:29:12 +00002688 struct tcp_fastopen_cookie *foc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689{
William Allen Simpsonbd0388a2009-12-02 18:23:05 +00002690 struct tcp_out_options opts;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002691 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 struct tcp_sock *tp = tcp_sk(sk);
2693 struct tcphdr *th;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 struct sk_buff *skb;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002695 struct tcp_md5sig_key *md5;
William Allen Simpsonbd0388a2009-12-02 18:23:05 +00002696 int tcp_header_size;
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07002697 int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Phil Oesterb70a23a2013-08-27 16:41:40 -07002699 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
Eric Dumazet4aea39c2012-06-03 20:33:21 +00002700 if (unlikely(!skb)) {
2701 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 return NULL;
Eric Dumazet4aea39c2012-06-03 20:33:21 +00002703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 /* Reserve space for headers. */
2705 skb_reserve(skb, MAX_TCP_HEADER);
2706
Eric Dumazet4aea39c2012-06-03 20:33:21 +00002707 skb_dst_set(skb, dst);
Eric Dumazetca10b9e2013-04-08 17:58:11 +00002708 security_skb_owned_by(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
David S. Miller0dbaee32010-12-13 12:52:14 -08002710 mss = dst_metric_advmss(dst);
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07002711 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2712 mss = tp->rx_opt.user_mss;
2713
Adam Langley33ad7982008-07-19 00:04:31 -07002714 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2715 __u8 rcv_wscale;
2716 /* Set this up on the first call only */
2717 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Hagen Paul Pfeifere88c64f2010-08-19 06:33:05 +00002718
2719 /* limit the window selection if the user enforce a smaller rx buffer */
2720 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2721 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2722 req->window_clamp = tcp_full_space(sk);
2723
Adam Langley33ad7982008-07-19 00:04:31 -07002724 /* tcp_full_space because it is guaranteed to be the first packet */
2725 tcp_select_initial_window(tcp_full_space(sk),
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07002726 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Adam Langley33ad7982008-07-19 00:04:31 -07002727 &req->rcv_wnd,
2728 &req->window_clamp,
2729 ireq->wscale_ok,
laurent chavey31d12922009-12-15 11:15:28 +00002730 &rcv_wscale,
2731 dst_metric(dst, RTAX_INITRWND));
Adam Langley33ad7982008-07-19 00:04:31 -07002732 ireq->rcv_wscale = rcv_wscale;
2733 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002734
Adam Langley33ad7982008-07-19 00:04:31 -07002735 memset(&opts, 0, sizeof(opts));
Florian Westphal8b5f12d2008-10-26 23:10:12 -07002736#ifdef CONFIG_SYN_COOKIES
2737 if (unlikely(req->cookie_ts))
2738 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2739 else
2740#endif
Adam Langley33ad7982008-07-19 00:04:31 -07002741 TCP_SKB_CB(skb)->when = tcp_time_stamp;
Christoph Paasch1a2c6182013-03-17 08:23:34 +00002742 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2743 foc) + sizeof(*th);
Adam Langley33ad7982008-07-19 00:04:31 -07002744
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002745 skb_push(skb, tcp_header_size);
2746 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002748 th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 memset(th, 0, sizeof(struct tcphdr));
2750 th->syn = 1;
2751 th->ack = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 TCP_ECN_make_synack(req, th);
KOVACS Krisztiana3116ac52008-10-01 07:46:49 -07002753 th->source = ireq->loc_port;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002754 th->dest = ireq->rmt_port;
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002755 /* Setting of flags are superfluous here for callers (and ECE is
2756 * not even correctly set)
2757 */
2758 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
Changli Gaoa3433f32010-06-12 14:01:43 +00002759 TCPHDR_SYN | TCPHDR_ACK);
William Allen Simpson4957faade2009-12-02 18:25:27 +00002760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 th->seq = htonl(TCP_SKB_CB(skb)->seq);
Jerry Chu83368862012-08-31 12:29:12 +00002762 /* XXX data is queued and acked as is. No buffer/window check */
2763 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
Ilpo Järvinen600ff0c2007-02-13 12:42:11 -08002766 th->window = htons(min(req->rcv_wnd, 65535U));
William Allen Simpsonbd0388a2009-12-02 18:23:05 +00002767 tcp_options_write((__be32 *)(th + 1), tp, &opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 th->doff = (tcp_header_size >> 2);
Tom Herbertaa2ea052010-04-22 07:00:24 +00002769 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002770
2771#ifdef CONFIG_TCP_MD5SIG
2772 /* Okay, we have all we need - do the md5 hash if needed */
2773 if (md5) {
William Allen Simpsonbd0388a2009-12-02 18:23:05 +00002774 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
Adam Langley49a72df2008-07-19 00:01:42 -07002775 md5, NULL, req, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002776 }
2777#endif
2778
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 return skb;
2780}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002781EXPORT_SYMBOL(tcp_make_synack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
Andi Kleen67edfef2009-07-21 23:00:40 +00002783/* Do all connect socket setups that can be done AF independent. */
Pavel Emelyanov370816a2012-04-19 03:40:01 +00002784void tcp_connect_init(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002786 const struct dst_entry *dst = __sk_dst_get(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 struct tcp_sock *tp = tcp_sk(sk);
2788 __u8 rcv_wscale;
2789
2790 /* We'll fix this up when we get a response from the other end.
2791 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2792 */
2793 tp->tcp_header_len = sizeof(struct tcphdr) +
David S. Millerbb5b7c12009-12-15 20:56:42 -08002794 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002796#ifdef CONFIG_TCP_MD5SIG
2797 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2798 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2799#endif
2800
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 /* If user gave his TCP_MAXSEG, record it to clamp */
2802 if (tp->rx_opt.user_mss)
2803 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2804 tp->max_window = 0;
John Heffner5d424d52006-03-20 17:53:41 -08002805 tcp_mtup_init(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 tcp_sync_mss(sk, dst_mtu(dst));
2807
2808 if (!tp->window_clamp)
2809 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
David S. Miller0dbaee32010-12-13 12:52:14 -08002810 tp->advmss = dst_metric_advmss(dst);
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07002811 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2812 tp->advmss = tp->rx_opt.user_mss;
2813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 tcp_initialize_rcv_mss(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815
Hagen Paul Pfeifere88c64f2010-08-19 06:33:05 +00002816 /* limit the window selection if the user enforce a smaller rx buffer */
2817 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2818 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2819 tp->window_clamp = tcp_full_space(sk);
2820
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 tcp_select_initial_window(tcp_full_space(sk),
2822 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2823 &tp->rcv_wnd,
2824 &tp->window_clamp,
David S. Millerbb5b7c12009-12-15 20:56:42 -08002825 sysctl_tcp_window_scaling,
laurent chavey31d12922009-12-15 11:15:28 +00002826 &rcv_wscale,
2827 dst_metric(dst, RTAX_INITRWND));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
2829 tp->rx_opt.rcv_wscale = rcv_wscale;
2830 tp->rcv_ssthresh = tp->rcv_wnd;
2831
2832 sk->sk_err = 0;
2833 sock_reset_flag(sk, SOCK_DONE);
2834 tp->snd_wnd = 0;
Hantzis Fotisee7537b2009-03-02 22:42:02 -08002835 tcp_init_wl(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 tp->snd_una = tp->write_seq;
2837 tp->snd_sml = tp->write_seq;
Ilpo Järvinen33f5f572008-10-07 14:43:06 -07002838 tp->snd_up = tp->write_seq;
Pavel Emelyanov370816a2012-04-19 03:40:01 +00002839 tp->snd_nxt = tp->write_seq;
Pavel Emelyanovee995282012-04-19 03:40:39 +00002840
2841 if (likely(!tp->repair))
2842 tp->rcv_nxt = 0;
Andrew Vagin6f198dc2013-08-27 12:20:40 +04002843 else
2844 tp->rcv_tstamp = tcp_time_stamp;
Pavel Emelyanovee995282012-04-19 03:40:39 +00002845 tp->rcv_wup = tp->rcv_nxt;
2846 tp->copied_seq = tp->rcv_nxt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002848 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2849 inet_csk(sk)->icsk_retransmits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 tcp_clear_retrans(tp);
2851}
2852
Yuchung Cheng783237e2012-07-19 06:43:07 +00002853static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2854{
2855 struct tcp_sock *tp = tcp_sk(sk);
2856 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2857
2858 tcb->end_seq += skb->len;
2859 skb_header_release(skb);
2860 __tcp_add_write_queue_tail(sk, skb);
2861 sk->sk_wmem_queued += skb->truesize;
2862 sk_mem_charge(sk, skb->truesize);
2863 tp->write_seq = tcb->end_seq;
2864 tp->packets_out += tcp_skb_pcount(skb);
2865}
2866
2867/* Build and send a SYN with data and (cached) Fast Open cookie. However,
2868 * queue a data-only packet after the regular SYN, such that regular SYNs
2869 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2870 * only the SYN sequence, the data are retransmitted in the first ACK.
2871 * If cookie is not cached or other error occurs, falls back to send a
2872 * regular SYN with Fast Open cookie request option.
2873 */
2874static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2875{
2876 struct tcp_sock *tp = tcp_sk(sk);
2877 struct tcp_fastopen_request *fo = tp->fastopen_req;
Yuchung Chengaab48742012-07-19 06:43:10 +00002878 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
Yuchung Cheng783237e2012-07-19 06:43:07 +00002879 struct sk_buff *syn_data = NULL, *data;
Yuchung Chengaab48742012-07-19 06:43:10 +00002880 unsigned long last_syn_loss = 0;
Yuchung Cheng783237e2012-07-19 06:43:07 +00002881
Yuchung Cheng67da22d2012-07-19 06:43:11 +00002882 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
Yuchung Chengaab48742012-07-19 06:43:10 +00002883 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2884 &syn_loss, &last_syn_loss);
2885 /* Recurring FO SYN losses: revert to regular handshake temporarily */
2886 if (syn_loss > 1 &&
2887 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2888 fo->cookie.len = -1;
2889 goto fallback;
2890 }
2891
Yuchung Cheng67da22d2012-07-19 06:43:11 +00002892 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
2893 fo->cookie.len = -1;
2894 else if (fo->cookie.len <= 0)
Yuchung Cheng783237e2012-07-19 06:43:07 +00002895 goto fallback;
2896
2897 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2898 * user-MSS. Reserve maximum option space for middleboxes that add
2899 * private TCP options. The cost is reduced data space in SYN :(
2900 */
2901 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2902 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
Yuchung Cheng1b63edd2013-02-22 08:59:06 +00002903 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
Yuchung Cheng783237e2012-07-19 06:43:07 +00002904 MAX_TCP_OPTION_SPACE;
2905
Eric Dumazetfe42b172014-02-20 10:09:18 -08002906 space = min_t(size_t, space, fo->size);
2907
2908 /* limit to order-0 allocations */
2909 space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
2910
2911 syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
Yuchung Cheng783237e2012-07-19 06:43:07 +00002912 sk->sk_allocation);
2913 if (syn_data == NULL)
2914 goto fallback;
2915
2916 for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2917 struct iovec *iov = &fo->data->msg_iov[i];
2918 unsigned char __user *from = iov->iov_base;
2919 int len = iov->iov_len;
2920
2921 if (syn_data->len + len > space)
2922 len = space - syn_data->len;
2923 else if (i + 1 == iovlen)
2924 /* No more data pending in inet_wait_for_connect() */
2925 fo->data = NULL;
2926
2927 if (skb_add_data(syn_data, from, len))
2928 goto fallback;
2929 }
2930
2931 /* Queue a data-only packet after the regular SYN for retransmission */
2932 data = pskb_copy(syn_data, sk->sk_allocation);
2933 if (data == NULL)
2934 goto fallback;
2935 TCP_SKB_CB(data)->seq++;
2936 TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2937 TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2938 tcp_connect_queue_skb(sk, data);
2939 fo->copied = data->len;
2940
2941 if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
Yuchung Cheng67da22d2012-07-19 06:43:11 +00002942 tp->syn_data = (fo->copied > 0);
Yuchung Cheng783237e2012-07-19 06:43:07 +00002943 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
2944 goto done;
2945 }
2946 syn_data = NULL;
2947
2948fallback:
2949 /* Send a regular SYN with Fast Open cookie request option */
2950 if (fo->cookie.len > 0)
2951 fo->cookie.len = 0;
2952 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
2953 if (err)
2954 tp->syn_fastopen = 0;
2955 kfree_skb(syn_data);
2956done:
2957 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
2958 return err;
2959}
2960
Andi Kleen67edfef2009-07-21 23:00:40 +00002961/* Build a SYN and send it off. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962int tcp_connect(struct sock *sk)
2963{
2964 struct tcp_sock *tp = tcp_sk(sk);
2965 struct sk_buff *buff;
Eric Parisee586812010-11-16 11:52:49 +00002966 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967
2968 tcp_connect_init(sk);
2969
Andrey Vagin2b916472012-11-22 01:13:58 +00002970 if (unlikely(tp->repair)) {
2971 tcp_finish_connect(sk, NULL);
2972 return 0;
2973 }
2974
David S. Millerd179cd12005-08-17 14:57:30 -07002975 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 if (unlikely(buff == NULL))
2977 return -ENOBUFS;
2978
2979 /* Reserve space for headers. */
2980 skb_reserve(buff, MAX_TCP_HEADER);
2981
Changli Gaoa3433f32010-06-12 14:01:43 +00002982 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
Yuchung Cheng783237e2012-07-19 06:43:07 +00002983 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
2984 tcp_connect_queue_skb(sk, buff);
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002985 TCP_ECN_send_syn(sk, buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
Yuchung Cheng783237e2012-07-19 06:43:07 +00002987 /* Send off SYN; include data in Fast Open. */
2988 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
2989 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
Eric Parisee586812010-11-16 11:52:49 +00002990 if (err == -ECONNREFUSED)
2991 return err;
Wei Yongjunbd37a082006-08-07 21:04:15 -07002992
2993 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2994 * in order to make this packet get counted in tcpOutSegs.
2995 */
2996 tp->snd_nxt = tp->write_seq;
2997 tp->pushed_seq = tp->write_seq;
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07002998 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
3000 /* Timer for repeating the SYN until an answer. */
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07003001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3002 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 return 0;
3004}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00003005EXPORT_SYMBOL(tcp_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006
3007/* Send out a delayed ack, the caller does the policy checking
3008 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
3009 * for details.
3010 */
3011void tcp_send_delayed_ack(struct sock *sk)
3012{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003013 struct inet_connection_sock *icsk = inet_csk(sk);
3014 int ato = icsk->icsk_ack.ato;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 unsigned long timeout;
3016
3017 if (ato > TCP_DELACK_MIN) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003018 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08003019 int max_ato = HZ / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020
Ilpo Järvinen056834d2007-12-31 14:57:14 -08003021 if (icsk->icsk_ack.pingpong ||
3022 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 max_ato = TCP_DELACK_MAX;
3024
3025 /* Slow path, intersegment interval is "high". */
3026
3027 /* If some rtt estimate is known, use it to bound delayed ack.
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003028 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 * directly.
3030 */
3031 if (tp->srtt) {
Ilpo Järvinen056834d2007-12-31 14:57:14 -08003032 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
3034 if (rtt < max_ato)
3035 max_ato = rtt;
3036 }
3037
3038 ato = min(ato, max_ato);
3039 }
3040
3041 /* Stay within the limit we were given */
3042 timeout = jiffies + ato;
3043
3044 /* Use new timeout only if there wasn't a older one earlier. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003045 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 /* If delack timer was blocked or is about to expire,
3047 * send ACK now.
3048 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003049 if (icsk->icsk_ack.blocked ||
3050 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 tcp_send_ack(sk);
3052 return;
3053 }
3054
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003055 if (!time_before(timeout, icsk->icsk_ack.timeout))
3056 timeout = icsk->icsk_ack.timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 }
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003058 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3059 icsk->icsk_ack.timeout = timeout;
3060 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061}
3062
3063/* This routine sends an ack and also updates the window. */
3064void tcp_send_ack(struct sock *sk)
3065{
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003066 struct sk_buff *buff;
3067
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 /* If we have been reset, we may not send again. */
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003069 if (sk->sk_state == TCP_CLOSE)
3070 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003072 /* We are not putting this on the write queue, so
3073 * tcp_transmit_skb() will set the ownership to this
3074 * sock.
3075 */
Mel Gorman99a1dec2012-07-31 16:44:14 -07003076 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003077 if (buff == NULL) {
3078 inet_csk_schedule_ack(sk);
3079 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
3080 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
3081 TCP_DELACK_MAX, TCP_RTO_MAX);
3082 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 }
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003084
3085 /* Reserve space for headers and prepare control bits. */
3086 skb_reserve(buff, MAX_TCP_HEADER);
Changli Gaoa3433f32010-06-12 14:01:43 +00003087 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003088
3089 /* Send it off, this clears delayed acks for us. */
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003090 TCP_SKB_CB(buff)->when = tcp_time_stamp;
Mel Gorman99a1dec2012-07-31 16:44:14 -07003091 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092}
3093
3094/* This routine sends a packet with an out of date sequence
3095 * number. It assumes the other end will try to ack it.
3096 *
3097 * Question: what should we make while urgent mode?
3098 * 4.4BSD forces sending single byte of data. We cannot send
3099 * out of window data, because we have SND.NXT==SND.MAX...
3100 *
3101 * Current solution: to send TWO zero-length segments in urgent mode:
3102 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3103 * out-of-date with SND.UNA-1 to probe window.
3104 */
3105static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3106{
3107 struct tcp_sock *tp = tcp_sk(sk);
3108 struct sk_buff *skb;
3109
3110 /* We don't queue it, tcp_transmit_skb() sets ownership. */
Mel Gorman99a1dec2012-07-31 16:44:14 -07003111 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003112 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 return -1;
3114
3115 /* Reserve space for headers and set control bits. */
3116 skb_reserve(skb, MAX_TCP_HEADER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 /* Use a previous sequence. This should cause the other
3118 * end to send an ack. Don't queue or clone SKB, just
3119 * send it.
3120 */
Changli Gaoa3433f32010-06-12 14:01:43 +00003121 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerdfb4b9d2005-12-06 16:24:52 -08003123 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124}
3125
Pavel Emelyanovee995282012-04-19 03:40:39 +00003126void tcp_send_window_probe(struct sock *sk)
3127{
3128 if (sk->sk_state == TCP_ESTABLISHED) {
3129 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3130 tcp_xmit_probe_skb(sk, 0);
3131 }
3132}
3133
Andi Kleen67edfef2009-07-21 23:00:40 +00003134/* Initiate keepalive or window probe from timer. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135int tcp_write_wakeup(struct sock *sk)
3136{
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003137 struct tcp_sock *tp = tcp_sk(sk);
3138 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003140 if (sk->sk_state == TCP_CLOSE)
3141 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003143 if ((skb = tcp_send_head(sk)) != NULL &&
3144 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3145 int err;
Ilpo Järvinen0c54b852009-03-14 14:23:05 +00003146 unsigned int mss = tcp_current_mss(sk);
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003147 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003149 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
3150 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003152 /* We are probing the opening of a window
3153 * but the window size is != 0
3154 * must have been a result SWS avoidance ( sender )
3155 */
3156 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
3157 skb->len > mss) {
3158 seg_size = min(seg_size, mss);
Eric Dumazet4de075e2011-09-27 13:25:05 -04003159 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003160 if (tcp_fragment(sk, skb, seg_size, mss))
3161 return -1;
3162 } else if (!tcp_skb_pcount(skb))
3163 tcp_set_skb_tso_segs(sk, skb, mss);
3164
Eric Dumazet4de075e2011-09-27 13:25:05 -04003165 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003166 TCP_SKB_CB(skb)->when = tcp_time_stamp;
3167 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3168 if (!err)
3169 tcp_event_new_data_sent(sk, skb);
3170 return err;
3171 } else {
Ilpo Järvinen33f5f572008-10-07 14:43:06 -07003172 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
Ilpo Järvinen058dc332007-12-31 04:51:11 -08003173 tcp_xmit_probe_skb(sk, 1);
3174 return tcp_xmit_probe_skb(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176}
3177
3178/* A window probe timeout has occurred. If window is not closed send
3179 * a partial packet else a zero probe.
3180 */
3181void tcp_send_probe0(struct sock *sk)
3182{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003183 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 struct tcp_sock *tp = tcp_sk(sk);
3185 int err;
3186
3187 err = tcp_write_wakeup(sk);
3188
David S. Millerfe067e82007-03-07 12:12:44 -08003189 if (tp->packets_out || !tcp_send_head(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 /* Cancel probe timer, if it is not required. */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03003191 icsk->icsk_probes_out = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003192 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 return;
3194 }
3195
3196 if (err <= 0) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003197 if (icsk->icsk_backoff < sysctl_tcp_retries2)
3198 icsk->icsk_backoff++;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03003199 icsk->icsk_probes_out++;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003200 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07003201 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
3202 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 } else {
3204 /* If packet was not sent due to local congestion,
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03003205 * do not backoff and do not remember icsk_probes_out.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 * Let local senders to fight for local resources.
3207 *
3208 * Use accumulated backoff yet.
3209 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03003210 if (!icsk->icsk_probes_out)
3211 icsk->icsk_probes_out = 1;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09003212 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003213 min(icsk->icsk_rto << icsk->icsk_backoff,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07003214 TCP_RESOURCE_PROBE_INTERVAL),
3215 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 }
3217}