~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/tcp_input.c

Version: ~ [ linux-5.6-rc1 ] ~ [ linux-5.5.2 ] ~ [ linux-5.4.17 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.102 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.170 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.213 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.213 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  4  *              operating system.  INET is implemented using the  BSD Socket
  5  *              interface as the means of communication with the user level.
  6  *
  7  *              Implementation of the Transmission Control Protocol(TCP).
  8  *
  9  * Authors:     Ross Biro
 10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
 12  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 13  *              Florian La Roche, <flla@stud.uni-sb.de>
 14  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
 16  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 17  *              Matthew Dillon, <dillon@apollo.west.oic.com>
 18  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19  *              Jorge Cwik, <jorge@laser.satlink.net>
 20  */
 21 
 22 /*
 23  * Changes:
 24  *              Pedro Roque     :       Fast Retransmit/Recovery.
 25  *                                      Two receive queues.
 26  *                                      Retransmit queue handled by TCP.
 27  *                                      Better retransmit timer handling.
 28  *                                      New congestion avoidance.
 29  *                                      Header prediction.
 30  *                                      Variable renaming.
 31  *
 32  *              Eric            :       Fast Retransmit.
 33  *              Randy Scott     :       MSS option defines.
 34  *              Eric Schenk     :       Fixes to slow start algorithm.
 35  *              Eric Schenk     :       Yet another double ACK bug.
 36  *              Eric Schenk     :       Delayed ACK bug fixes.
 37  *              Eric Schenk     :       Floyd style fast retrans war avoidance.
 38  *              David S. Miller :       Don't allow zero congestion window.
 39  *              Eric Schenk     :       Fix retransmitter so that it sends
 40  *                                      next packet on ack of previous packet.
 41  *              Andi Kleen      :       Moved open_request checking here
 42  *                                      and process RSTs for open_requests.
 43  *              Andi Kleen      :       Better prune_queue, and other fixes.
 44  *              Andrey Savochkin:       Fix RTT measurements in the presence of
 45  *                                      timestamps.
 46  *              Andrey Savochkin:       Check sequence numbers correctly when
 47  *                                      removing SACKs due to in sequence incoming
 48  *                                      data segments.
 49  *              Andi Kleen:             Make sure we never ack data there is not
 50  *                                      enough room for. Also make this condition
 51  *                                      a fatal error if it might still happen.
 52  *              Andi Kleen:             Add tcp_measure_rcv_mss to make
 53  *                                      connections with MSS<min(MTU,ann. MSS)
 54  *                                      work without delayed acks.
 55  *              Andi Kleen:             Process packets with PSH set in the
 56  *                                      fast path.
 57  *              J Hadi Salim:           ECN support
 58  *              Andrei Gurtov,
 59  *              Pasi Sarolahti,
 60  *              Panu Kuhlberg:          Experimental audit of TCP (re)transmission
 61  *                                      engine. Lots of bugs are found.
 62  *              Pasi Sarolahti:         F-RTO for dealing with spurious RTOs
 63  */
 64 
 65 #define pr_fmt(fmt) "TCP: " fmt
 66 
 67 #include <linux/mm.h>
 68 #include <linux/slab.h>
 69 #include <linux/module.h>
 70 #include <linux/sysctl.h>
 71 #include <linux/kernel.h>
 72 #include <linux/prefetch.h>
 73 #include <net/dst.h>
 74 #include <net/tcp.h>
 75 #include <net/inet_common.h>
 76 #include <linux/ipsec.h>
 77 #include <asm/unaligned.h>
 78 #include <linux/errqueue.h>
 79 #include <trace/events/tcp.h>
 80 #include <linux/static_key.h>
 81 
 82 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 83 
 84 #define FLAG_DATA               0x01 /* Incoming frame contained data.          */
 85 #define FLAG_WIN_UPDATE         0x02 /* Incoming ACK was a window update.       */
 86 #define FLAG_DATA_ACKED         0x04 /* This ACK acknowledged new data.         */
 87 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted.  */
 88 #define FLAG_SYN_ACKED          0x10 /* This ACK acknowledged SYN.              */
 89 #define FLAG_DATA_SACKED        0x20 /* New SACK.                               */
 90 #define FLAG_ECE                0x40 /* ECE in this ACK                         */
 91 #define FLAG_LOST_RETRANS       0x80 /* This ACK marks some retransmission lost */
 92 #define FLAG_SLOWPATH           0x100 /* Do not skip RFC checks for window update.*/
 93 #define FLAG_ORIG_SACK_ACKED    0x200 /* Never retransmitted data are (s)acked  */
 94 #define FLAG_SND_UNA_ADVANCED   0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
 95 #define FLAG_DSACKING_ACK       0x800 /* SACK blocks contained D-SACK info */
 96 #define FLAG_SET_XMIT_TIMER     0x1000 /* Set TLP or RTO timer */
 97 #define FLAG_SACK_RENEGING      0x2000 /* snd_una advanced to a sacked seq */
 98 #define FLAG_UPDATE_TS_RECENT   0x4000 /* tcp_replace_ts_recent() */
 99 #define FLAG_NO_CHALLENGE_ACK   0x8000 /* do not call tcp_send_challenge_ack()  */
100 #define FLAG_ACK_MAYBE_DELAYED  0x10000 /* Likely a delayed ACK */
101 
102 #define FLAG_ACKED              (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
103 #define FLAG_NOT_DUP            (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
104 #define FLAG_CA_ALERT           (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
105 #define FLAG_FORWARD_PROGRESS   (FLAG_ACKED|FLAG_DATA_SACKED)
106 
107 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
108 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
109 
110 #define REXMIT_NONE     0 /* no loss recovery to do */
111 #define REXMIT_LOST     1 /* retransmit packets marked lost */
112 #define REXMIT_NEW      2 /* FRTO-style transmit of unsent/new packets */
113 
114 #if IS_ENABLED(CONFIG_TLS_DEVICE)
115 static DEFINE_STATIC_KEY_FALSE(clean_acked_data_enabled);
116 
117 void clean_acked_data_enable(struct inet_connection_sock *icsk,
118                              void (*cad)(struct sock *sk, u32 ack_seq))
119 {
120         icsk->icsk_clean_acked = cad;
121         static_branch_inc(&clean_acked_data_enabled);
122 }
123 EXPORT_SYMBOL_GPL(clean_acked_data_enable);
124 
125 void clean_acked_data_disable(struct inet_connection_sock *icsk)
126 {
127         static_branch_dec(&clean_acked_data_enabled);
128         icsk->icsk_clean_acked = NULL;
129 }
130 EXPORT_SYMBOL_GPL(clean_acked_data_disable);
131 #endif
132 
133 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
134                              unsigned int len)
135 {
136         static bool __once __read_mostly;
137 
138         if (!__once) {
139                 struct net_device *dev;
140 
141                 __once = true;
142 
143                 rcu_read_lock();
144                 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
145                 if (!dev || len >= dev->mtu)
146                         pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
147                                 dev ? dev->name : "Unknown driver");
148                 rcu_read_unlock();
149         }
150 }
151 
152 /* Adapt the MSS value used to make delayed ack decision to the
153  * real world.
154  */
155 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
156 {
157         struct inet_connection_sock *icsk = inet_csk(sk);
158         const unsigned int lss = icsk->icsk_ack.last_seg_size;
159         unsigned int len;
160 
161         icsk->icsk_ack.last_seg_size = 0;
162 
163         /* skb->len may jitter because of SACKs, even if peer
164          * sends good full-sized frames.
165          */
166         len = skb_shinfo(skb)->gso_size ? : skb->len;
167         if (len >= icsk->icsk_ack.rcv_mss) {
168                 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
169                                                tcp_sk(sk)->advmss);
170                 /* Account for possibly-removed options */
171                 if (unlikely(len > icsk->icsk_ack.rcv_mss +
172                                    MAX_TCP_OPTION_SPACE))
173                         tcp_gro_dev_warn(sk, skb, len);
174         } else {
175                 /* Otherwise, we make more careful check taking into account,
176                  * that SACKs block is variable.
177                  *
178                  * "len" is invariant segment length, including TCP header.
179                  */
180                 len += skb->data - skb_transport_header(skb);
181                 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
182                     /* If PSH is not set, packet should be
183                      * full sized, provided peer TCP is not badly broken.
184                      * This observation (if it is correct 8)) allows
185                      * to handle super-low mtu links fairly.
186                      */
187                     (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
188                      !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
189                         /* Subtract also invariant (if peer is RFC compliant),
190                          * tcp header plus fixed timestamp option length.
191                          * Resulting "len" is MSS free of SACK jitter.
192                          */
193                         len -= tcp_sk(sk)->tcp_header_len;
194                         icsk->icsk_ack.last_seg_size = len;
195                         if (len == lss) {
196                                 icsk->icsk_ack.rcv_mss = len;
197                                 return;
198                         }
199                 }
200                 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
201                         icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
202                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
203         }
204 }
205 
206 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
207 {
208         struct inet_connection_sock *icsk = inet_csk(sk);
209         unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
210 
211         if (quickacks == 0)
212                 quickacks = 2;
213         quickacks = min(quickacks, max_quickacks);
214         if (quickacks > icsk->icsk_ack.quick)
215                 icsk->icsk_ack.quick = quickacks;
216 }
217 
218 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
219 {
220         struct inet_connection_sock *icsk = inet_csk(sk);
221 
222         tcp_incr_quickack(sk, max_quickacks);
223         icsk->icsk_ack.pingpong = 0;
224         icsk->icsk_ack.ato = TCP_ATO_MIN;
225 }
226 EXPORT_SYMBOL(tcp_enter_quickack_mode);
227 
228 /* Send ACKs quickly, if "quick" count is not exhausted
229  * and the session is not interactive.
230  */
231 
232 static bool tcp_in_quickack_mode(struct sock *sk)
233 {
234         const struct inet_connection_sock *icsk = inet_csk(sk);
235         const struct dst_entry *dst = __sk_dst_get(sk);
236 
237         return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
238                 (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
239 }
240 
241 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
242 {
243         if (tp->ecn_flags & TCP_ECN_OK)
244                 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
245 }
246 
247 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
248 {
249         if (tcp_hdr(skb)->cwr) {
250                 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
251 
252                 /* If the sender is telling us it has entered CWR, then its
253                  * cwnd may be very low (even just 1 packet), so we should ACK
254                  * immediately.
255                  */
256                 tcp_enter_quickack_mode((struct sock *)tp, 2);
257         }
258 }
259 
260 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
261 {
262         tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
263 }
264 
265 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
266 {
267         struct tcp_sock *tp = tcp_sk(sk);
268 
269         switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
270         case INET_ECN_NOT_ECT:
271                 /* Funny extension: if ECT is not set on a segment,
272                  * and we already seen ECT on a previous segment,
273                  * it is probably a retransmit.
274                  */
275                 if (tp->ecn_flags & TCP_ECN_SEEN)
276                         tcp_enter_quickack_mode(sk, 2);
277                 break;
278         case INET_ECN_CE:
279                 if (tcp_ca_needs_ecn(sk))
280                         tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
281 
282                 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
283                         /* Better not delay acks, sender can have a very low cwnd */
284                         tcp_enter_quickack_mode(sk, 2);
285                         tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
286                 }
287                 tp->ecn_flags |= TCP_ECN_SEEN;
288                 break;
289         default:
290                 if (tcp_ca_needs_ecn(sk))
291                         tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
292                 tp->ecn_flags |= TCP_ECN_SEEN;
293                 break;
294         }
295 }
296 
297 static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
298 {
299         if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
300                 __tcp_ecn_check_ce(sk, skb);
301 }
302 
303 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
304 {
305         if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
306                 tp->ecn_flags &= ~TCP_ECN_OK;
307 }
308 
309 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
310 {
311         if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
312                 tp->ecn_flags &= ~TCP_ECN_OK;
313 }
314 
315 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
316 {
317         if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
318                 return true;
319         return false;
320 }
321 
322 /* Buffer size and advertised window tuning.
323  *
324  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
325  */
326 
327 static void tcp_sndbuf_expand(struct sock *sk)
328 {
329         const struct tcp_sock *tp = tcp_sk(sk);
330         const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
331         int sndmem, per_mss;
332         u32 nr_segs;
333 
334         /* Worst case is non GSO/TSO : each frame consumes one skb
335          * and skb->head is kmalloced using power of two area of memory
336          */
337         per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
338                   MAX_TCP_HEADER +
339                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
340 
341         per_mss = roundup_pow_of_two(per_mss) +
342                   SKB_DATA_ALIGN(sizeof(struct sk_buff));
343 
344         nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
345         nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
346 
347         /* Fast Recovery (RFC 5681 3.2) :
348          * Cubic needs 1.7 factor, rounded to 2 to include
349          * extra cushion (application might react slowly to EPOLLOUT)
350          */
351         sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
352         sndmem *= nr_segs * per_mss;
353 
354         if (sk->sk_sndbuf < sndmem)
355                 sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
356 }
357 
358 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
359  *
360  * All tcp_full_space() is split to two parts: "network" buffer, allocated
361  * forward and advertised in receiver window (tp->rcv_wnd) and
362  * "application buffer", required to isolate scheduling/application
363  * latencies from network.
364  * window_clamp is maximal advertised window. It can be less than
365  * tcp_full_space(), in this case tcp_full_space() - window_clamp
366  * is reserved for "application" buffer. The less window_clamp is
367  * the smoother our behaviour from viewpoint of network, but the lower
368  * throughput and the higher sensitivity of the connection to losses. 8)
369  *
370  * rcv_ssthresh is more strict window_clamp used at "slow start"
371  * phase to predict further behaviour of this connection.
372  * It is used for two goals:
373  * - to enforce header prediction at sender, even when application
374  *   requires some significant "application buffer". It is check #1.
375  * - to prevent pruning of receive queue because of misprediction
376  *   of receiver window. Check #2.
377  *
378  * The scheme does not work when sender sends good segments opening
379  * window and then starts to feed us spaghetti. But it should work
380  * in common situations. Otherwise, we have to rely on queue collapsing.
381  */
382 
383 /* Slow part of check#2. */
384 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
385 {
386         struct tcp_sock *tp = tcp_sk(sk);
387         /* Optimize this! */
388         int truesize = tcp_win_from_space(sk, skb->truesize) >> 1;
389         int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
390 
391         while (tp->rcv_ssthresh <= window) {
392                 if (truesize <= skb->len)
393                         return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
394 
395                 truesize >>= 1;
396                 window >>= 1;
397         }
398         return 0;
399 }
400 
401 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
402 {
403         struct tcp_sock *tp = tcp_sk(sk);
404 
405         /* Check #1 */
406         if (tp->rcv_ssthresh < tp->window_clamp &&
407             (int)tp->rcv_ssthresh < tcp_space(sk) &&
408             !tcp_under_memory_pressure(sk)) {
409                 int incr;
410 
411                 /* Check #2. Increase window, if skb with such overhead
412                  * will fit to rcvbuf in future.
413                  */
414                 if (tcp_win_from_space(sk, skb->truesize) <= skb->len)
415                         incr = 2 * tp->advmss;
416                 else
417                         incr = __tcp_grow_window(sk, skb);
418 
419                 if (incr) {
420                         incr = max_t(int, incr, 2 * skb->len);
421                         tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
422                                                tp->window_clamp);
423                         inet_csk(sk)->icsk_ack.quick |= 1;
424                 }
425         }
426 }
427 
428 /* 3. Tuning rcvbuf, when connection enters established state. */
429 static void tcp_fixup_rcvbuf(struct sock *sk)
430 {
431         u32 mss = tcp_sk(sk)->advmss;
432         int rcvmem;
433 
434         rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
435                  tcp_default_init_rwnd(mss);
436 
437         /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
438          * Allow enough cushion so that sender is not limited by our window
439          */
440         if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)
441                 rcvmem <<= 2;
442 
443         if (sk->sk_rcvbuf < rcvmem)
444                 sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
445 }
446 
447 /* 4. Try to fixup all. It is made immediately after connection enters
448  *    established state.
449  */
450 void tcp_init_buffer_space(struct sock *sk)
451 {
452         int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
453         struct tcp_sock *tp = tcp_sk(sk);
454         int maxwin;
455 
456         if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
457                 tcp_fixup_rcvbuf(sk);
458         if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
459                 tcp_sndbuf_expand(sk);
460 
461         tp->rcvq_space.space = tp->rcv_wnd;
462         tcp_mstamp_refresh(tp);
463         tp->rcvq_space.time = tp->tcp_mstamp;
464         tp->rcvq_space.seq = tp->copied_seq;
465 
466         maxwin = tcp_full_space(sk);
467 
468         if (tp->window_clamp >= maxwin) {
469                 tp->window_clamp = maxwin;
470 
471                 if (tcp_app_win && maxwin > 4 * tp->advmss)
472                         tp->window_clamp = max(maxwin -
473                                                (maxwin >> tcp_app_win),
474                                                4 * tp->advmss);
475         }
476 
477         /* Force reservation of one segment. */
478         if (tcp_app_win &&
479             tp->window_clamp > 2 * tp->advmss &&
480             tp->window_clamp + tp->advmss > maxwin)
481                 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
482 
483         tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
484         tp->snd_cwnd_stamp = tcp_jiffies32;
485 }
486 
487 /* 5. Recalculate window clamp after socket hit its memory bounds. */
488 static void tcp_clamp_window(struct sock *sk)
489 {
490         struct tcp_sock *tp = tcp_sk(sk);
491         struct inet_connection_sock *icsk = inet_csk(sk);
492         struct net *net = sock_net(sk);
493 
494         icsk->icsk_ack.quick = 0;
495 
496         if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
497             !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
498             !tcp_under_memory_pressure(sk) &&
499             sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
500                 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
501                                     net->ipv4.sysctl_tcp_rmem[2]);
502         }
503         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
504                 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
505 }
506 
507 /* Initialize RCV_MSS value.
508  * RCV_MSS is an our guess about MSS used by the peer.
509  * We haven't any direct information about the MSS.
510  * It's better to underestimate the RCV_MSS rather than overestimate.
511  * Overestimations make us ACKing less frequently than needed.
512  * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
513  */
514 void tcp_initialize_rcv_mss(struct sock *sk)
515 {
516         const struct tcp_sock *tp = tcp_sk(sk);
517         unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
518 
519         hint = min(hint, tp->rcv_wnd / 2);
520         hint = min(hint, TCP_MSS_DEFAULT);
521         hint = max(hint, TCP_MIN_MSS);
522 
523         inet_csk(sk)->icsk_ack.rcv_mss = hint;
524 }
525 EXPORT_SYMBOL(tcp_initialize_rcv_mss);
526 
527 /* Receiver "autotuning" code.
528  *
529  * The algorithm for RTT estimation w/o timestamps is based on
530  * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
531  * <http://public.lanl.gov/radiant/pubs.html#DRS>
532  *
533  * More detail on this code can be found at
534  * <http://staff.psc.edu/jheffner/>,
535  * though this reference is out of date.  A new paper
536  * is pending.
537  */
538 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
539 {
540         u32 new_sample = tp->rcv_rtt_est.rtt_us;
541         long m = sample;
542 
543         if (new_sample != 0) {
544                 /* If we sample in larger samples in the non-timestamp
545                  * case, we could grossly overestimate the RTT especially
546                  * with chatty applications or bulk transfer apps which
547                  * are stalled on filesystem I/O.
548                  *
549                  * Also, since we are only going for a minimum in the
550                  * non-timestamp case, we do not smooth things out
551                  * else with timestamps disabled convergence takes too
552                  * long.
553                  */
554                 if (!win_dep) {
555                         m -= (new_sample >> 3);
556                         new_sample += m;
557                 } else {
558                         m <<= 3;
559                         if (m < new_sample)
560                                 new_sample = m;
561                 }
562         } else {
563                 /* No previous measure. */
564                 new_sample = m << 3;
565         }
566 
567         tp->rcv_rtt_est.rtt_us = new_sample;
568 }
569 
570 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
571 {
572         u32 delta_us;
573 
574         if (tp->rcv_rtt_est.time == 0)
575                 goto new_measure;
576         if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
577                 return;
578         delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
579         if (!delta_us)
580                 delta_us = 1;
581         tcp_rcv_rtt_update(tp, delta_us, 1);
582 
583 new_measure:
584         tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
585         tp->rcv_rtt_est.time = tp->tcp_mstamp;
586 }
587 
588 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
589                                           const struct sk_buff *skb)
590 {
591         struct tcp_sock *tp = tcp_sk(sk);
592 
593         if (tp->rx_opt.rcv_tsecr &&
594             (TCP_SKB_CB(skb)->end_seq -
595              TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
596                 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
597                 u32 delta_us;
598 
599                 if (!delta)
600                         delta = 1;
601                 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
602                 tcp_rcv_rtt_update(tp, delta_us, 0);
603         }
604 }
605 
606 /*
607  * This function should be called every time data is copied to user space.
608  * It calculates the appropriate TCP receive buffer space.
609  */
610 void tcp_rcv_space_adjust(struct sock *sk)
611 {
612         struct tcp_sock *tp = tcp_sk(sk);
613         u32 copied;
614         int time;
615 
616         trace_tcp_rcv_space_adjust(sk);
617 
618         tcp_mstamp_refresh(tp);
619         time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
620         if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
621                 return;
622 
623         /* Number of bytes copied to user in last RTT */
624         copied = tp->copied_seq - tp->rcvq_space.seq;
625         if (copied <= tp->rcvq_space.space)
626                 goto new_measure;
627 
628         /* A bit of theory :
629          * copied = bytes received in previous RTT, our base window
630          * To cope with packet losses, we need a 2x factor
631          * To cope with slow start, and sender growing its cwin by 100 %
632          * every RTT, we need a 4x factor, because the ACK we are sending
633          * now is for the next RTT, not the current one :
634          * <prev RTT . ><current RTT .. ><next RTT .... >
635          */
636 
637         if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
638             !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
639                 int rcvmem, rcvbuf;
640                 u64 rcvwin, grow;
641 
642                 /* minimal window to cope with packet losses, assuming
643                  * steady state. Add some cushion because of small variations.
644                  */
645                 rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
646 
647                 /* Accommodate for sender rate increase (eg. slow start) */
648                 grow = rcvwin * (copied - tp->rcvq_space.space);
649                 do_div(grow, tp->rcvq_space.space);
650                 rcvwin += (grow << 1);
651 
652                 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
653                 while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
654                         rcvmem += 128;
655 
656                 do_div(rcvwin, tp->advmss);
657                 rcvbuf = min_t(u64, rcvwin * rcvmem,
658                                sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
659                 if (rcvbuf > sk->sk_rcvbuf) {
660                         sk->sk_rcvbuf = rcvbuf;
661 
662                         /* Make the window clamp follow along.  */
663                         tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
664                 }
665         }
666         tp->rcvq_space.space = copied;
667 
668 new_measure:
669         tp->rcvq_space.seq = tp->copied_seq;
670         tp->rcvq_space.time = tp->tcp_mstamp;
671 }
672 
673 /* There is something which you must keep in mind when you analyze the
674  * behavior of the tp->ato delayed ack timeout interval.  When a
675  * connection starts up, we want to ack as quickly as possible.  The
676  * problem is that "good" TCP's do slow start at the beginning of data
677  * transmission.  The means that until we send the first few ACK's the
678  * sender will sit on his end and only queue most of his data, because
679  * he can only send snd_cwnd unacked packets at any given time.  For
680  * each ACK we send, he increments snd_cwnd and transmits more of his
681  * queue.  -DaveM
682  */
683 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
684 {
685         struct tcp_sock *tp = tcp_sk(sk);
686         struct inet_connection_sock *icsk = inet_csk(sk);
687         u32 now;
688 
689         inet_csk_schedule_ack(sk);
690 
691         tcp_measure_rcv_mss(sk, skb);
692 
693         tcp_rcv_rtt_measure(tp);
694 
695         now = tcp_jiffies32;
696 
697         if (!icsk->icsk_ack.ato) {
698                 /* The _first_ data packet received, initialize
699                  * delayed ACK engine.
700                  */
701                 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
702                 icsk->icsk_ack.ato = TCP_ATO_MIN;
703         } else {
704                 int m = now - icsk->icsk_ack.lrcvtime;
705 
706                 if (m <= TCP_ATO_MIN / 2) {
707                         /* The fastest case is the first. */
708                         icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
709                 } else if (m < icsk->icsk_ack.ato) {
710                         icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
711                         if (icsk->icsk_ack.ato > icsk->icsk_rto)
712                                 icsk->icsk_ack.ato = icsk->icsk_rto;
713                 } else if (m > icsk->icsk_rto) {
714                         /* Too long gap. Apparently sender failed to
715                          * restart window, so that we send ACKs quickly.
716                          */
717                         tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
718                         sk_mem_reclaim(sk);
719                 }
720         }
721         icsk->icsk_ack.lrcvtime = now;
722 
723         tcp_ecn_check_ce(sk, skb);
724 
725         if (skb->len >= 128)
726                 tcp_grow_window(sk, skb);
727 }
728 
729 /* Called to compute a smoothed rtt estimate. The data fed to this
730  * routine either comes from timestamps, or from segments that were
731  * known _not_ to have been retransmitted [see Karn/Partridge
732  * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
733  * piece by Van Jacobson.
734  * NOTE: the next three routines used to be one big routine.
735  * To save cycles in the RFC 1323 implementation it was better to break
736  * it up into three procedures. -- erics
737  */
738 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
739 {
740         struct tcp_sock *tp = tcp_sk(sk);
741         long m = mrtt_us; /* RTT */
742         u32 srtt = tp->srtt_us;
743 
744         /*      The following amusing code comes from Jacobson's
745          *      article in SIGCOMM '88.  Note that rtt and mdev
746          *      are scaled versions of rtt and mean deviation.
747          *      This is designed to be as fast as possible
748          *      m stands for "measurement".
749          *
750          *      On a 1990 paper the rto value is changed to:
751          *      RTO = rtt + 4 * mdev
752          *
753          * Funny. This algorithm seems to be very broken.
754          * These formulae increase RTO, when it should be decreased, increase
755          * too slowly, when it should be increased quickly, decrease too quickly
756          * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
757          * does not matter how to _calculate_ it. Seems, it was trap
758          * that VJ failed to avoid. 8)
759          */
760         if (srtt != 0) {
761                 m -= (srtt >> 3);       /* m is now error in rtt est */
762                 srtt += m;              /* rtt = 7/8 rtt + 1/8 new */
763                 if (m < 0) {
764                         m = -m;         /* m is now abs(error) */
765                         m -= (tp->mdev_us >> 2);   /* similar update on mdev */
766                         /* This is similar to one of Eifel findings.
767                          * Eifel blocks mdev updates when rtt decreases.
768                          * This solution is a bit different: we use finer gain
769                          * for mdev in this case (alpha*beta).
770                          * Like Eifel it also prevents growth of rto,
771                          * but also it limits too fast rto decreases,
772                          * happening in pure Eifel.
773                          */
774                         if (m > 0)
775                                 m >>= 3;
776                 } else {
777                         m -= (tp->mdev_us >> 2);   /* similar update on mdev */
778                 }
779                 tp->mdev_us += m;               /* mdev = 3/4 mdev + 1/4 new */
780                 if (tp->mdev_us > tp->mdev_max_us) {
781                         tp->mdev_max_us = tp->mdev_us;
782                         if (tp->mdev_max_us > tp->rttvar_us)
783                                 tp->rttvar_us = tp->mdev_max_us;
784                 }
785                 if (after(tp->snd_una, tp->rtt_seq)) {
786                         if (tp->mdev_max_us < tp->rttvar_us)
787                                 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
788                         tp->rtt_seq = tp->snd_nxt;
789                         tp->mdev_max_us = tcp_rto_min_us(sk);
790                 }
791         } else {
792                 /* no previous measure. */
793                 srtt = m << 3;          /* take the measured time to be rtt */
794                 tp->mdev_us = m << 1;   /* make sure rto = 3*rtt */
795                 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
796                 tp->mdev_max_us = tp->rttvar_us;
797                 tp->rtt_seq = tp->snd_nxt;
798         }
799         tp->srtt_us = max(1U, srtt);
800 }
801 
802 static void tcp_update_pacing_rate(struct sock *sk)
803 {
804         const struct tcp_sock *tp = tcp_sk(sk);
805         u64 rate;
806 
807         /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
808         rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
809 
810         /* current rate is (cwnd * mss) / srtt
811          * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
812          * In Congestion Avoidance phase, set it to 120 % the current rate.
813          *
814          * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
815          *       If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
816          *       end of slow start and should slow down.
817          */
818         if (tp->snd_cwnd < tp->snd_ssthresh / 2)
819                 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
820         else
821                 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
822 
823         rate *= max(tp->snd_cwnd, tp->packets_out);
824 
825         if (likely(tp->srtt_us))
826                 do_div(rate, tp->srtt_us);
827 
828         /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
829          * without any lock. We want to make sure compiler wont store
830          * intermediate values in this location.
831          */
832         WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
833                                              sk->sk_max_pacing_rate));
834 }
835 
836 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
837  * routine referred to above.
838  */
839 static void tcp_set_rto(struct sock *sk)
840 {
841         const struct tcp_sock *tp = tcp_sk(sk);
842         /* Old crap is replaced with new one. 8)
843          *
844          * More seriously:
845          * 1. If rtt variance happened to be less 50msec, it is hallucination.
846          *    It cannot be less due to utterly erratic ACK generation made
847          *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
848          *    to do with delayed acks, because at cwnd>2 true delack timeout
849          *    is invisible. Actually, Linux-2.4 also generates erratic
850          *    ACKs in some circumstances.
851          */
852         inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
853 
854         /* 2. Fixups made earlier cannot be right.
855          *    If we do not estimate RTO correctly without them,
856          *    all the algo is pure shit and should be replaced
857          *    with correct one. It is exactly, which we pretend to do.
858          */
859 
860         /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
861          * guarantees that rto is higher.
862          */
863         tcp_bound_rto(sk);
864 }
865 
866 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
867 {
868         __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
869 
870         if (!cwnd)
871                 cwnd = TCP_INIT_CWND;
872         return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
873 }
874 
875 /* Take a notice that peer is sending D-SACKs */
876 static void tcp_dsack_seen(struct tcp_sock *tp)
877 {
878         tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
879         tp->rack.dsack_seen = 1;
880 }
881 
882 /* It's reordering when higher sequence was delivered (i.e. sacked) before
883  * some lower never-retransmitted sequence ("low_seq"). The maximum reordering
884  * distance is approximated in full-mss packet distance ("reordering").
885  */
886 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
887                                       const int ts)
888 {
889         struct tcp_sock *tp = tcp_sk(sk);
890         const u32 mss = tp->mss_cache;
891         u32 fack, metric;
892 
893         fack = tcp_highest_sack_seq(tp);
894         if (!before(low_seq, fack))
895                 return;
896 
897         metric = fack - low_seq;
898         if ((metric > tp->reordering * mss) && mss) {
899 #if FASTRETRANS_DEBUG > 1
900                 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
901                          tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
902                          tp->reordering,
903                          0,
904                          tp->sacked_out,
905                          tp->undo_marker ? tp->undo_retrans : 0);
906 #endif
907                 tp->reordering = min_t(u32, (metric + mss - 1) / mss,
908                                        sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
909         }
910 
911         tp->rack.reord = 1;
912         /* This exciting event is worth to be remembered. 8) */
913         NET_INC_STATS(sock_net(sk),
914                       ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
915 }
916 
917 /* This must be called before lost_out is incremented */
918 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
919 {
920         if (!tp->retransmit_skb_hint ||
921             before(TCP_SKB_CB(skb)->seq,
922                    TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
923                 tp->retransmit_skb_hint = skb;
924 }
925 
926 /* Sum the number of packets on the wire we have marked as lost.
927  * There are two cases we care about here:
928  * a) Packet hasn't been marked lost (nor retransmitted),
929  *    and this is the first loss.
930  * b) Packet has been marked both lost and retransmitted,
931  *    and this means we think it was lost again.
932  */
933 static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
934 {
935         __u8 sacked = TCP_SKB_CB(skb)->sacked;
936 
937         if (!(sacked & TCPCB_LOST) ||
938             ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
939                 tp->lost += tcp_skb_pcount(skb);
940 }
941 
942 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
943 {
944         if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
945                 tcp_verify_retransmit_hint(tp, skb);
946 
947                 tp->lost_out += tcp_skb_pcount(skb);
948                 tcp_sum_lost(tp, skb);
949                 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
950         }
951 }
952 
953 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
954 {
955         tcp_verify_retransmit_hint(tp, skb);
956 
957         tcp_sum_lost(tp, skb);
958         if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
959                 tp->lost_out += tcp_skb_pcount(skb);
960                 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
961         }
962 }
963 
964 /* This procedure tags the retransmission queue when SACKs arrive.
965  *
966  * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
967  * Packets in queue with these bits set are counted in variables
968  * sacked_out, retrans_out and lost_out, correspondingly.
969  *
970  * Valid combinations are:
971  * Tag  InFlight        Description
972  * 0    1               - orig segment is in flight.
973  * S    0               - nothing flies, orig reached receiver.
974  * L    0               - nothing flies, orig lost by net.
975  * R    2               - both orig and retransmit are in flight.
976  * L|R  1               - orig is lost, retransmit is in flight.
977  * S|R  1               - orig reached receiver, retrans is still in flight.
978  * (L|S|R is logically valid, it could occur when L|R is sacked,
979  *  but it is equivalent to plain S and code short-curcuits it to S.
980  *  L|S is logically invalid, it would mean -1 packet in flight 8))
981  *
982  * These 6 states form finite state machine, controlled by the following events:
983  * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
984  * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
985  * 3. Loss detection event of two flavors:
986  *      A. Scoreboard estimator decided the packet is lost.
987  *         A'. Reno "three dupacks" marks head of queue lost.
988  *      B. SACK arrives sacking SND.NXT at the moment, when the
989  *         segment was retransmitted.
990  * 4. D-SACK added new rule: D-SACK changes any tag to S.
991  *
992  * It is pleasant to note, that state diagram turns out to be commutative,
993  * so that we are allowed not to be bothered by order of our actions,
994  * when multiple events arrive simultaneously. (see the function below).
995  *
996  * Reordering detection.
997  * --------------------
998  * Reordering metric is maximal distance, which a packet can be displaced
999  * in packet stream. With SACKs we can estimate it:
1000  *
1001  * 1. SACK fills old hole and the corresponding segment was not
1002  *    ever retransmitted -> reordering. Alas, we cannot use it
1003  *    when segment was retransmitted.
1004  * 2. The last flaw is solved with D-SACK. D-SACK arrives
1005  *    for retransmitted and already SACKed segment -> reordering..
1006  * Both of these heuristics are not used in Loss state, when we cannot
1007  * account for retransmits accurately.
1008  *
1009  * SACK block validation.
1010  * ----------------------
1011  *
1012  * SACK block range validation checks that the received SACK block fits to
1013  * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1014  * Note that SND.UNA is not included to the range though being valid because
1015  * it means that the receiver is rather inconsistent with itself reporting
1016  * SACK reneging when it should advance SND.UNA. Such SACK block this is
1017  * perfectly valid, however, in light of RFC2018 which explicitly states
1018  * that "SACK block MUST reflect the newest segment.  Even if the newest
1019  * segment is going to be discarded ...", not that it looks very clever
1020  * in case of head skb. Due to potentional receiver driven attacks, we
1021  * choose to avoid immediate execution of a walk in write queue due to
1022  * reneging and defer head skb's loss recovery to standard loss recovery
1023  * procedure that will eventually trigger (nothing forbids us doing this).
1024  *
1025  * Implements also blockage to start_seq wrap-around. Problem lies in the
1026  * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1027  * there's no guarantee that it will be before snd_nxt (n). The problem
1028  * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1029  * wrap (s_w):
1030  *
1031  *         <- outs wnd ->                          <- wrapzone ->
1032  *         u     e      n                         u_w   e_w  s n_w
1033  *         |     |      |                          |     |   |  |
1034  * |<------------+------+----- TCP seqno space --------------+---------->|
1035  * ...-- <2^31 ->|                                           |<--------...
1036  * ...---- >2^31 ------>|                                    |<--------...
1037  *
1038  * Current code wouldn't be vulnerable but it's better still to discard such
1039  * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1040  * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1041  * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1042  * equal to the ideal case (infinite seqno space without wrap caused issues).
1043  *
1044  * With D-SACK the lower bound is extended to cover sequence space below
1045  * SND.UNA down to undo_marker, which is the last point of interest. Yet
1046  * again, D-SACK block must not to go across snd_una (for the same reason as
1047  * for the normal SACK blocks, explained above). But there all simplicity
1048  * ends, TCP might receive valid D-SACKs below that. As long as they reside
1049  * fully below undo_marker they do not affect behavior in anyway and can
1050  * therefore be safely ignored. In rare cases (which are more or less
1051  * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1052  * fragmentation and packet reordering past skb's retransmission. To consider
1053  * them correctly, the acceptable range must be extended even more though
1054  * the exact amount is rather hard to quantify. However, tp->max_window can
1055  * be used as an exaggerated estimate.
1056  */
1057 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1058                                    u32 start_seq, u32 end_seq)
1059 {
1060         /* Too far in future, or reversed (interpretation is ambiguous) */
1061         if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1062                 return false;
1063 
1064         /* Nasty start_seq wrap-around check (see comments above) */
1065         if (!before(start_seq, tp->snd_nxt))
1066                 return false;
1067 
1068         /* In outstanding window? ...This is valid exit for D-SACKs too.
1069          * start_seq == snd_una is non-sensical (see comments above)
1070          */
1071         if (after(start_seq, tp->snd_una))
1072                 return true;
1073 
1074         if (!is_dsack || !tp->undo_marker)
1075                 return false;
1076 
1077         /* ...Then it's D-SACK, and must reside below snd_una completely */
1078         if (after(end_seq, tp->snd_una))
1079                 return false;
1080 
1081         if (!before(start_seq, tp->undo_marker))
1082                 return true;
1083 
1084         /* Too old */
1085         if (!after(end_seq, tp->undo_marker))
1086                 return false;
1087 
1088         /* Undo_marker boundary crossing (overestimates a lot). Known already:
1089          *   start_seq < undo_marker and end_seq >= undo_marker.
1090          */
1091         return !before(start_seq, end_seq - tp->max_window);
1092 }
1093 
1094 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1095                             struct tcp_sack_block_wire *sp, int num_sacks,
1096                             u32 prior_snd_una)
1097 {
1098         struct tcp_sock *tp = tcp_sk(sk);
1099         u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1100         u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1101         bool dup_sack = false;
1102 
1103         if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1104                 dup_sack = true;
1105                 tcp_dsack_seen(tp);
1106                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1107         } else if (num_sacks > 1) {
1108                 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1109                 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1110 
1111                 if (!after(end_seq_0, end_seq_1) &&
1112                     !before(start_seq_0, start_seq_1)) {
1113                         dup_sack = true;
1114                         tcp_dsack_seen(tp);
1115                         NET_INC_STATS(sock_net(sk),
1116                                         LINUX_MIB_TCPDSACKOFORECV);
1117                 }
1118         }
1119 
1120         /* D-SACK for already forgotten data... Do dumb counting. */
1121         if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1122             !after(end_seq_0, prior_snd_una) &&
1123             after(end_seq_0, tp->undo_marker))
1124                 tp->undo_retrans--;
1125 
1126         return dup_sack;
1127 }
1128 
1129 struct tcp_sacktag_state {
1130         u32     reord;
1131         /* Timestamps for earliest and latest never-retransmitted segment
1132          * that was SACKed. RTO needs the earliest RTT to stay conservative,
1133          * but congestion control should still get an accurate delay signal.
1134          */
1135         u64     first_sackt;
1136         u64     last_sackt;
1137         struct rate_sample *rate;
1138         int     flag;
1139         unsigned int mss_now;
1140 };
1141 
1142 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
1143  * the incoming SACK may not exactly match but we can find smaller MSS
1144  * aligned portion of it that matches. Therefore we might need to fragment
1145  * which may fail and creates some hassle (caller must handle error case
1146  * returns).
1147  *
1148  * FIXME: this could be merged to shift decision code
1149  */
1150 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1151                                   u32 start_seq, u32 end_seq)
1152 {
1153         int err;
1154         bool in_sack;
1155         unsigned int pkt_len;
1156         unsigned int mss;
1157 
1158         in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1159                   !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1160 
1161         if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1162             after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1163                 mss = tcp_skb_mss(skb);
1164                 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1165 
1166                 if (!in_sack) {
1167                         pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1168                         if (pkt_len < mss)
1169                                 pkt_len = mss;
1170                 } else {
1171                         pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1172                         if (pkt_len < mss)
1173                                 return -EINVAL;
1174                 }
1175 
1176                 /* Round if necessary so that SACKs cover only full MSSes
1177                  * and/or the remaining small portion (if present)
1178                  */
1179                 if (pkt_len > mss) {
1180                         unsigned int new_len = (pkt_len / mss) * mss;
1181                         if (!in_sack && new_len < pkt_len)
1182                                 new_len += mss;
1183                         pkt_len = new_len;
1184                 }
1185 
1186                 if (pkt_len >= skb->len && !in_sack)
1187                         return 0;
1188 
1189                 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
1190                                    pkt_len, mss, GFP_ATOMIC);
1191                 if (err < 0)
1192                         return err;
1193         }
1194 
1195         return in_sack;
1196 }
1197 
1198 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1199 static u8 tcp_sacktag_one(struct sock *sk,
1200                           struct tcp_sacktag_state *state, u8 sacked,
1201                           u32 start_seq, u32 end_seq,
1202                           int dup_sack, int pcount,
1203                           u64 xmit_time)
1204 {
1205         struct tcp_sock *tp = tcp_sk(sk);
1206 
1207         /* Account D-SACK for retransmitted packet. */
1208         if (dup_sack && (sacked & TCPCB_RETRANS)) {
1209                 if (tp->undo_marker && tp->undo_retrans > 0 &&
1210                     after(end_seq, tp->undo_marker))
1211                         tp->undo_retrans--;
1212                 if ((sacked & TCPCB_SACKED_ACKED) &&
1213                     before(start_seq, state->reord))
1214                                 state->reord = start_seq;
1215         }
1216 
1217         /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1218         if (!after(end_seq, tp->snd_una))
1219                 return sacked;
1220 
1221         if (!(sacked & TCPCB_SACKED_ACKED)) {
1222                 tcp_rack_advance(tp, sacked, end_seq, xmit_time);
1223 
1224                 if (sacked & TCPCB_SACKED_RETRANS) {
1225                         /* If the segment is not tagged as lost,
1226                          * we do not clear RETRANS, believing
1227                          * that retransmission is still in flight.
1228                          */
1229                         if (sacked & TCPCB_LOST) {
1230                                 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1231                                 tp->lost_out -= pcount;
1232                                 tp->retrans_out -= pcount;
1233                         }
1234                 } else {
1235                         if (!(sacked & TCPCB_RETRANS)) {
1236                                 /* New sack for not retransmitted frame,
1237                                  * which was in hole. It is reordering.
1238                                  */
1239                                 if (before(start_seq,
1240                                            tcp_highest_sack_seq(tp)) &&
1241                                     before(start_seq, state->reord))
1242                                         state->reord = start_seq;
1243 
1244                                 if (!after(end_seq, tp->high_seq))
1245                                         state->flag |= FLAG_ORIG_SACK_ACKED;
1246                                 if (state->first_sackt == 0)
1247                                         state->first_sackt = xmit_time;
1248                                 state->last_sackt = xmit_time;
1249                         }
1250 
1251                         if (sacked & TCPCB_LOST) {
1252                                 sacked &= ~TCPCB_LOST;
1253                                 tp->lost_out -= pcount;
1254                         }
1255                 }
1256 
1257                 sacked |= TCPCB_SACKED_ACKED;
1258                 state->flag |= FLAG_DATA_SACKED;
1259                 tp->sacked_out += pcount;
1260                 tp->delivered += pcount;  /* Out-of-order packets delivered */
1261 
1262                 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1263                 if (tp->lost_skb_hint &&
1264                     before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1265                         tp->lost_cnt_hint += pcount;
1266         }
1267 
1268         /* D-SACK. We can detect redundant retransmission in S|R and plain R
1269          * frames and clear it. undo_retrans is decreased above, L|R frames
1270          * are accounted above as well.
1271          */
1272         if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1273                 sacked &= ~TCPCB_SACKED_RETRANS;
1274                 tp->retrans_out -= pcount;
1275         }
1276 
1277         return sacked;
1278 }
1279 
1280 /* Shift newly-SACKed bytes from this skb to the immediately previous
1281  * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1282  */
1283 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1284                             struct sk_buff *skb,
1285                             struct tcp_sacktag_state *state,
1286                             unsigned int pcount, int shifted, int mss,
1287                             bool dup_sack)
1288 {
1289         struct tcp_sock *tp = tcp_sk(sk);
1290         u32 start_seq = TCP_SKB_CB(skb)->seq;   /* start of newly-SACKed */
1291         u32 end_seq = start_seq + shifted;      /* end of newly-SACKed */
1292 
1293         BUG_ON(!pcount);
1294 
1295         /* Adjust counters and hints for the newly sacked sequence
1296          * range but discard the return value since prev is already
1297          * marked. We must tag the range first because the seq
1298          * advancement below implicitly advances
1299          * tcp_highest_sack_seq() when skb is highest_sack.
1300          */
1301         tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1302                         start_seq, end_seq, dup_sack, pcount,
1303                         skb->skb_mstamp);
1304         tcp_rate_skb_delivered(sk, skb, state->rate);
1305 
1306         if (skb == tp->lost_skb_hint)
1307                 tp->lost_cnt_hint += pcount;
1308 
1309         TCP_SKB_CB(prev)->end_seq += shifted;
1310         TCP_SKB_CB(skb)->seq += shifted;
1311 
1312         tcp_skb_pcount_add(prev, pcount);
1313         BUG_ON(tcp_skb_pcount(skb) < pcount);
1314         tcp_skb_pcount_add(skb, -pcount);
1315 
1316         /* When we're adding to gso_segs == 1, gso_size will be zero,
1317          * in theory this shouldn't be necessary but as long as DSACK
1318          * code can come after this skb later on it's better to keep
1319          * setting gso_size to something.
1320          */
1321         if (!TCP_SKB_CB(prev)->tcp_gso_size)
1322                 TCP_SKB_CB(prev)->tcp_gso_size = mss;
1323 
1324         /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1325         if (tcp_skb_pcount(skb) <= 1)
1326                 TCP_SKB_CB(skb)->tcp_gso_size = 0;
1327 
1328         /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1329         TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1330 
1331         if (skb->len > 0) {
1332                 BUG_ON(!tcp_skb_pcount(skb));
1333                 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1334                 return false;
1335         }
1336 
1337         /* Whole SKB was eaten :-) */
1338 
1339         if (skb == tp->retransmit_skb_hint)
1340                 tp->retransmit_skb_hint = prev;
1341         if (skb == tp->lost_skb_hint) {
1342                 tp->lost_skb_hint = prev;
1343                 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1344         }
1345 
1346         TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1347         TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
1348         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1349                 TCP_SKB_CB(prev)->end_seq++;
1350 
1351         if (skb == tcp_highest_sack(sk))
1352                 tcp_advance_highest_sack(sk, skb);
1353 
1354         tcp_skb_collapse_tstamp(prev, skb);
1355         if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
1356                 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
1357 
1358         tcp_rtx_queue_unlink_and_free(skb, sk);
1359 
1360         NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1361 
1362         return true;
1363 }
1364 
1365 /* I wish gso_size would have a bit more sane initialization than
1366  * something-or-zero which complicates things
1367  */
1368 static int tcp_skb_seglen(const struct sk_buff *skb)
1369 {
1370         return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1371 }
1372 
1373 /* Shifting pages past head area doesn't work */
1374 static int skb_can_shift(const struct sk_buff *skb)
1375 {
1376         return !skb_headlen(skb) && skb_is_nonlinear(skb);
1377 }
1378 
1379 /* Try collapsing SACK blocks spanning across multiple skbs to a single
1380  * skb.
1381  */
1382 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1383                                           struct tcp_sacktag_state *state,
1384                                           u32 start_seq, u32 end_seq,
1385                                           bool dup_sack)
1386 {
1387         struct tcp_sock *tp = tcp_sk(sk);
1388         struct sk_buff *prev;
1389         int mss;
1390         int pcount = 0;
1391         int len;
1392         int in_sack;
1393 
1394         /* Normally R but no L won't result in plain S */
1395         if (!dup_sack &&
1396             (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
1397                 goto fallback;
1398         if (!skb_can_shift(skb))
1399                 goto fallback;
1400         /* This frame is about to be dropped (was ACKed). */
1401         if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1402                 goto fallback;
1403 
1404         /* Can only happen with delayed DSACK + discard craziness */
1405         prev = skb_rb_prev(skb);
1406         if (!prev)
1407                 goto fallback;
1408 
1409         if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1410                 goto fallback;
1411 
1412         if (!tcp_skb_can_collapse_to(prev))
1413                 goto fallback;
1414 
1415         in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1416                   !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1417 
1418         if (in_sack) {
1419                 len = skb->len;
1420                 pcount = tcp_skb_pcount(skb);
1421                 mss = tcp_skb_seglen(skb);
1422 
1423                 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1424                  * drop this restriction as unnecessary
1425                  */
1426                 if (mss != tcp_skb_seglen(prev))
1427                         goto fallback;
1428         } else {
1429                 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1430                         goto noop;
1431                 /* CHECKME: This is non-MSS split case only?, this will
1432                  * cause skipped skbs due to advancing loop btw, original
1433                  * has that feature too
1434                  */
1435                 if (tcp_skb_pcount(skb) <= 1)
1436                         goto noop;
1437 
1438                 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1439                 if (!in_sack) {
1440                         /* TODO: head merge to next could be attempted here
1441                          * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1442                          * though it might not be worth of the additional hassle
1443                          *
1444                          * ...we can probably just fallback to what was done
1445                          * previously. We could try merging non-SACKed ones
1446                          * as well but it probably isn't going to buy off
1447                          * because later SACKs might again split them, and
1448                          * it would make skb timestamp tracking considerably
1449                          * harder problem.
1450                          */
1451                         goto fallback;
1452                 }
1453 
1454                 len = end_seq - TCP_SKB_CB(skb)->seq;
1455                 BUG_ON(len < 0);
1456                 BUG_ON(len > skb->len);
1457 
1458                 /* MSS boundaries should be honoured or else pcount will
1459                  * severely break even though it makes things bit trickier.
1460                  * Optimize common case to avoid most of the divides
1461                  */
1462                 mss = tcp_skb_mss(skb);
1463 
1464                 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1465                  * drop this restriction as unnecessary
1466                  */
1467                 if (mss != tcp_skb_seglen(prev))
1468                         goto fallback;
1469 
1470                 if (len == mss) {
1471                         pcount = 1;
1472                 } else if (len < mss) {
1473                         goto noop;
1474                 } else {
1475                         pcount = len / mss;
1476                         len = pcount * mss;
1477                 }
1478         }
1479 
1480         /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1481         if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1482                 goto fallback;
1483 
1484         if (!skb_shift(prev, skb, len))
1485                 goto fallback;
1486         if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
1487                 goto out;
1488 
1489         /* Hole filled allows collapsing with the next as well, this is very
1490          * useful when hole on every nth skb pattern happens
1491          */
1492         skb = skb_rb_next(prev);
1493         if (!skb)
1494                 goto out;
1495 
1496         if (!skb_can_shift(skb) ||
1497             ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
1498             (mss != tcp_skb_seglen(skb)))
1499                 goto out;
1500 
1501         len = skb->len;
1502         if (skb_shift(prev, skb, len)) {
1503                 pcount += tcp_skb_pcount(skb);
1504                 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
1505                                 len, mss, 0);
1506         }
1507 
1508 out:
1509         return prev;
1510 
1511 noop:
1512         return skb;
1513 
1514 fallback:
1515         NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
1516         return NULL;
1517 }
1518 
1519 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1520                                         struct tcp_sack_block *next_dup,
1521                                         struct tcp_sacktag_state *state,
1522                                         u32 start_seq, u32 end_seq,
1523                                         bool dup_sack_in)
1524 {
1525         struct tcp_sock *tp = tcp_sk(sk);
1526         struct sk_buff *tmp;
1527 
1528         skb_rbtree_walk_from(skb) {
1529                 int in_sack = 0;
1530                 bool dup_sack = dup_sack_in;
1531 
1532                 /* queue is in-order => we can short-circuit the walk early */
1533                 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1534                         break;
1535 
1536                 if (next_dup  &&
1537                     before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1538                         in_sack = tcp_match_skb_to_sack(sk, skb,
1539                                                         next_dup->start_seq,
1540                                                         next_dup->end_seq);
1541                         if (in_sack > 0)
1542                                 dup_sack = true;
1543                 }
1544 
1545                 /* skb reference here is a bit tricky to get right, since
1546                  * shifting can eat and free both this skb and the next,
1547                  * so not even _safe variant of the loop is enough.
1548                  */
1549                 if (in_sack <= 0) {
1550                         tmp = tcp_shift_skb_data(sk, skb, state,
1551                                                  start_seq, end_seq, dup_sack);
1552                         if (tmp) {
1553                                 if (tmp != skb) {
1554                                         skb = tmp;
1555                                         continue;
1556                                 }
1557 
1558                                 in_sack = 0;
1559                         } else {
1560                                 in_sack = tcp_match_skb_to_sack(sk, skb,
1561                                                                 start_seq,
1562                                                                 end_seq);
1563                         }
1564                 }
1565 
1566                 if (unlikely(in_sack < 0))
1567                         break;
1568 
1569                 if (in_sack) {
1570                         TCP_SKB_CB(skb)->sacked =
1571                                 tcp_sacktag_one(sk,
1572                                                 state,
1573                                                 TCP_SKB_CB(skb)->sacked,
1574                                                 TCP_SKB_CB(skb)->seq,
1575                                                 TCP_SKB_CB(skb)->end_seq,
1576                                                 dup_sack,
1577                                                 tcp_skb_pcount(skb),
1578                                                 skb->skb_mstamp);
1579                         tcp_rate_skb_delivered(sk, skb, state->rate);
1580                         if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1581                                 list_del_init(&skb->tcp_tsorted_anchor);
1582 
1583                         if (!before(TCP_SKB_CB(skb)->seq,
1584                                     tcp_highest_sack_seq(tp)))
1585                                 tcp_advance_highest_sack(sk, skb);
1586                 }
1587         }
1588         return skb;
1589 }
1590 
1591 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk,
1592                                            struct tcp_sacktag_state *state,
1593                                            u32 seq)
1594 {
1595         struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
1596         struct sk_buff *skb;
1597 
1598         while (*p) {
1599                 parent = *p;
1600                 skb = rb_to_skb(parent);
1601                 if (before(seq, TCP_SKB_CB(skb)->seq)) {
1602                         p = &parent->rb_left;
1603                         continue;
1604                 }
1605                 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) {
1606                         p = &parent->rb_right;
1607                         continue;
1608                 }
1609                 return skb;
1610         }
1611         return NULL;
1612 }
1613 
1614 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
1615                                         struct tcp_sacktag_state *state,
1616                                         u32 skip_to_seq)
1617 {
1618         if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
1619                 return skb;
1620 
1621         return tcp_sacktag_bsearch(sk, state, skip_to_seq);
1622 }
1623 
1624 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1625                                                 struct sock *sk,
1626                                                 struct tcp_sack_block *next_dup,
1627                                                 struct tcp_sacktag_state *state,
1628                                                 u32 skip_to_seq)
1629 {
1630         if (!next_dup)
1631                 return skb;
1632 
1633         if (before(next_dup->start_seq, skip_to_seq)) {
1634                 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
1635                 skb = tcp_sacktag_walk(skb, sk, NULL, state,
1636                                        next_dup->start_seq, next_dup->end_seq,
1637                                        1);
1638         }
1639 
1640         return skb;
1641 }
1642 
1643 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
1644 {
1645         return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1646 }
1647 
1648 static int
1649 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1650                         u32 prior_snd_una, struct tcp_sacktag_state *state)
1651 {
1652         struct tcp_sock *tp = tcp_sk(sk);
1653         const unsigned char *ptr = (skb_transport_header(ack_skb) +
1654                                     TCP_SKB_CB(ack_skb)->sacked);
1655         struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1656         struct tcp_sack_block sp[TCP_NUM_SACKS];
1657         struct tcp_sack_block *cache;
1658         struct sk_buff *skb;
1659         int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1660         int used_sacks;
1661         bool found_dup_sack = false;
1662         int i, j;
1663         int first_sack_index;
1664 
1665         state->flag = 0;
1666         state->reord = tp->snd_nxt;
1667 
1668         if (!tp->sacked_out)
1669                 tcp_highest_sack_reset(sk);
1670 
1671         found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1672                                          num_sacks, prior_snd_una);
1673         if (found_dup_sack) {
1674                 state->flag |= FLAG_DSACKING_ACK;
1675                 tp->delivered++; /* A spurious retransmission is delivered */
1676         }
1677 
1678         /* Eliminate too old ACKs, but take into
1679          * account more or less fresh ones, they can
1680          * contain valid SACK info.
1681          */
1682         if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1683                 return 0;
1684 
1685         if (!tp->packets_out)
1686                 goto out;
1687 
1688         used_sacks = 0;
1689         first_sack_index = 0;
1690         for (i = 0; i < num_sacks; i++) {
1691                 bool dup_sack = !i && found_dup_sack;
1692 
1693                 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1694                 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
1695 
1696                 if (!tcp_is_sackblock_valid(tp, dup_sack,
1697                                             sp[used_sacks].start_seq,
1698                                             sp[used_sacks].end_seq)) {
1699                         int mib_idx;
1700 
1701                         if (dup_sack) {
1702                                 if (!tp->undo_marker)
1703                                         mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1704                                 else
1705                                         mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1706                         } else {
1707                                 /* Don't count olds caused by ACK reordering */
1708                                 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1709                                     !after(sp[used_sacks].end_seq, tp->snd_una))
1710                                         continue;
1711                                 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1712                         }
1713 
1714                         NET_INC_STATS(sock_net(sk), mib_idx);
1715                         if (i == 0)
1716                                 first_sack_index = -1;
1717                         continue;
1718                 }
1719 
1720                 /* Ignore very old stuff early */
1721                 if (!after(sp[used_sacks].end_seq, prior_snd_una))
1722                         continue;
1723 
1724                 used_sacks++;
1725         }
1726 
1727         /* order SACK blocks to allow in order walk of the retrans queue */
1728         for (i = used_sacks - 1; i > 0; i--) {
1729                 for (j = 0; j < i; j++) {
1730                         if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
1731                                 swap(sp[j], sp[j + 1]);
1732 
1733                                 /* Track where the first SACK block goes to */
1734                                 if (j == first_sack_index)
1735                                         first_sack_index = j + 1;
1736                         }
1737                 }
1738         }
1739 
1740         state->mss_now = tcp_current_mss(sk);
1741         skb = NULL;
1742         i = 0;
1743 
1744         if (!tp->sacked_out) {
1745                 /* It's already past, so skip checking against it */
1746                 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1747         } else {
1748                 cache = tp->recv_sack_cache;
1749                 /* Skip empty blocks in at head of the cache */
1750                 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
1751                        !cache->end_seq)
1752                         cache++;
1753         }
1754 
1755         while (i < used_sacks) {
1756                 u32 start_seq = sp[i].start_seq;
1757                 u32 end_seq = sp[i].end_seq;
1758                 bool dup_sack = (found_dup_sack && (i == first_sack_index));
1759                 struct tcp_sack_block *next_dup = NULL;
1760 
1761                 if (found_dup_sack && ((i + 1) == first_sack_index))
1762                         next_dup = &sp[i + 1];
1763 
1764                 /* Skip too early cached blocks */
1765                 while (tcp_sack_cache_ok(tp, cache) &&
1766                        !before(start_seq, cache->end_seq))
1767                         cache++;
1768 
1769                 /* Can skip some work by looking recv_sack_cache? */
1770                 if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
1771                     after(end_seq, cache->start_seq)) {
1772 
1773                         /* Head todo? */
1774                         if (before(start_seq, cache->start_seq)) {
1775                                 skb = tcp_sacktag_skip(skb, sk, state,
1776                                                        start_seq);
1777                                 skb = tcp_sacktag_walk(skb, sk, next_dup,
1778                                                        state,
1779                                                        start_seq,
1780                                                        cache->start_seq,
1781                                                        dup_sack);
1782                         }
1783 
1784                         /* Rest of the block already fully processed? */
1785                         if (!after(end_seq, cache->end_seq))
1786                                 goto advance_sp;
1787 
1788                         skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
1789                                                        state,
1790                                                        cache->end_seq);
1791 
1792                         /* ...tail remains todo... */
1793                         if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1794                                 /* ...but better entrypoint exists! */
1795                                 skb = tcp_highest_sack(sk);
1796                                 if (!skb)
1797                                         break;
1798                                 cache++;
1799                                 goto walk;
1800                         }
1801 
1802                         skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
1803                         /* Check overlap against next cached too (past this one already) */
1804                         cache++;
1805                         continue;
1806                 }
1807 
1808                 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1809                         skb = tcp_highest_sack(sk);
1810                         if (!skb)
1811                                 break;
1812                 }
1813                 skb = tcp_sacktag_skip(skb, sk, state, start_seq);
1814 
1815 walk:
1816                 skb = tcp_sacktag_walk(skb, sk, next_dup, state,
1817                                        start_seq, end_seq, dup_sack);
1818 
1819 advance_sp:
1820                 i++;
1821         }
1822 
1823         /* Clear the head of the cache sack blocks so we can skip it next time */
1824         for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
1825                 tp->recv_sack_cache[i].start_seq = 0;
1826                 tp->recv_sack_cache[i].end_seq = 0;
1827         }
1828         for (j = 0; j < used_sacks; j++)
1829                 tp->recv_sack_cache[i++] = sp[j];
1830 
1831         if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
1832                 tcp_check_sack_reordering(sk, state->reord, 0);
1833 
1834         tcp_verify_left_out(tp);
1835 out:
1836 
1837 #if FASTRETRANS_DEBUG > 0
1838         WARN_ON((int)tp->sacked_out < 0);
1839         WARN_ON((int)tp->lost_out < 0);
1840         WARN_ON((int)tp->retrans_out < 0);
1841         WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1842 #endif
1843         return state->flag;
1844 }
1845 
1846 /* Limits sacked_out so that sum with lost_out isn't ever larger than
1847  * packets_out. Returns false if sacked_out adjustement wasn't necessary.
1848  */
1849 static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
1850 {
1851         u32 holes;
1852 
1853         holes = max(tp->lost_out, 1U);
1854         holes = min(holes, tp->packets_out);
1855 
1856         if ((tp->sacked_out + holes) > tp->packets_out) {
1857                 tp->sacked_out = tp->packets_out - holes;
1858                 return true;
1859         }
1860         return false;
1861 }
1862 
1863 /* If we receive more dupacks than we expected counting segments
1864  * in assumption of absent reordering, interpret this as reordering.
1865  * The only another reason could be bug in receiver TCP.
1866  */
1867 static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1868 {
1869         struct tcp_sock *tp = tcp_sk(sk);
1870 
1871         if (!tcp_limit_reno_sacked(tp))
1872                 return;
1873 
1874         tp->reordering = min_t(u32, tp->packets_out + addend,
1875                                sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
1876         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
1877 }
1878 
1879 /* Emulate SACKs for SACKless connection: account for a new dupack. */
1880 
1881 static void tcp_add_reno_sack(struct sock *sk)
1882 {
1883         struct tcp_sock *tp = tcp_sk(sk);
1884         u32 prior_sacked = tp->sacked_out;
1885 
1886         tp->sacked_out++;
1887         tcp_check_reno_reordering(sk, 0);
1888         if (tp->sacked_out > prior_sacked)
1889                 tp->delivered++; /* Some out-of-order packet is delivered */
1890         tcp_verify_left_out(tp);
1891 }
1892 
1893 /* Account for ACK, ACKing some data in Reno Recovery phase. */
1894 
1895 static void tcp_remove_reno_sacks(struct sock *sk, int acked)
1896 {
1897         struct tcp_sock *tp = tcp_sk(sk);
1898 
1899         if (acked > 0) {
1900                 /* One ACK acked hole. The rest eat duplicate ACKs. */
1901                 tp->delivered += max_t(int, acked - tp->sacked_out, 1);
1902                 if (acked - 1 >= tp->sacked_out)
1903                         tp->sacked_out = 0;
1904                 else
1905                         tp->sacked_out -= acked - 1;
1906         }
1907         tcp_check_reno_reordering(sk, acked);
1908         tcp_verify_left_out(tp);
1909 }
1910 
1911 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1912 {
1913         tp->sacked_out = 0;
1914 }
1915 
1916 void tcp_clear_retrans(struct tcp_sock *tp)
1917 {
1918         tp->retrans_out = 0;
1919         tp->lost_out = 0;
1920         tp->undo_marker = 0;
1921         tp->undo_retrans = -1;
1922         tp->sacked_out = 0;
1923 }
1924 
1925 static inline void tcp_init_undo(struct tcp_sock *tp)
1926 {
1927         tp->undo_marker = tp->snd_una;
1928         /* Retransmission still in flight may cause DSACKs later. */
1929         tp->undo_retrans = tp->retrans_out ? : -1;
1930 }
1931 
1932 static bool tcp_is_rack(const struct sock *sk)
1933 {
1934         return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
1935 }
1936 
1937 /* If we detect SACK reneging, forget all SACK information
1938  * and reset tags completely, otherwise preserve SACKs. If receiver
1939  * dropped its ofo queue, we will know this due to reneging detection.
1940  */
1941 static void tcp_timeout_mark_lost(struct sock *sk)
1942 {
1943         struct tcp_sock *tp = tcp_sk(sk);
1944         struct sk_buff *skb, *head;
1945         bool is_reneg;                  /* is receiver reneging on SACKs? */
1946 
1947         head = tcp_rtx_queue_head(sk);
1948         is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED);
1949         if (is_reneg) {
1950                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1951                 tp->sacked_out = 0;
1952                 /* Mark SACK reneging until we recover from this loss event. */
1953                 tp->is_sack_reneg = 1;
1954         } else if (tcp_is_reno(tp)) {
1955                 tcp_reset_reno_sack(tp);
1956         }
1957 
1958         skb = head;
1959         skb_rbtree_walk_from(skb) {
1960                 if (is_reneg)
1961                         TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
1962                 else if (tcp_is_rack(sk) && skb != head &&
1963                          tcp_rack_skb_timeout(tp, skb, 0) > 0)
1964                         continue; /* Don't mark recently sent ones lost yet */
1965                 tcp_mark_skb_lost(sk, skb);
1966         }
1967         tcp_verify_left_out(tp);
1968         tcp_clear_all_retrans_hints(tp);
1969 }
1970 
1971 /* Enter Loss state. */
1972 void tcp_enter_loss(struct sock *sk)
1973 {
1974         const struct inet_connection_sock *icsk = inet_csk(sk);
1975         struct tcp_sock *tp = tcp_sk(sk);
1976         struct net *net = sock_net(sk);
1977         bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1978 
1979         tcp_timeout_mark_lost(sk);
1980 
1981         /* Reduce ssthresh if it has not yet been made inside this window. */
1982         if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1983             !after(tp->high_seq, tp->snd_una) ||
1984             (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1985                 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1986                 tp->prior_cwnd = tp->snd_cwnd;
1987                 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1988                 tcp_ca_event(sk, CA_EVENT_LOSS);
1989                 tcp_init_undo(tp);
1990         }
1991         tp->snd_cwnd       = tcp_packets_in_flight(tp) + 1;
1992         tp->snd_cwnd_cnt   = 0;
1993         tp->snd_cwnd_stamp = tcp_jiffies32;
1994 
1995         /* Timeout in disordered state after receiving substantial DUPACKs
1996          * suggests that the degree of reordering is over-estimated.
1997          */
1998         if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
1999             tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
2000                 tp->reordering = min_t(unsigned int, tp->reordering,
2001                                        net->ipv4.sysctl_tcp_reordering);
2002         tcp_set_ca_state(sk, TCP_CA_Loss);
2003         tp->high_seq = tp->snd_nxt;
2004         tcp_ecn_queue_cwr(tp);
2005 
2006         /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
2007          * loss recovery is underway except recurring timeout(s) on
2008          * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2009          */
2010         tp->frto = net->ipv4.sysctl_tcp_frto &&
2011                    (new_recovery || icsk->icsk_retransmits) &&
2012                    !inet_csk(sk)->icsk_mtup.probe_size;
2013 }
2014 
2015 /* If ACK arrived pointing to a remembered SACK, it means that our
2016  * remembered SACKs do not reflect real state of receiver i.e.
2017  * receiver _host_ is heavily congested (or buggy).
2018  *
2019  * To avoid big spurious retransmission bursts due to transient SACK
2020  * scoreboard oddities that look like reneging, we give the receiver a
2021  * little time (max(RTT/2, 10ms)) to send us some more ACKs that will
2022  * restore sanity to the SACK scoreboard. If the apparent reneging
2023  * persists until this RTO then we'll clear the SACK scoreboard.
2024  */
2025 static bool tcp_check_sack_reneging(struct sock *sk, int flag)
2026 {
2027         if (flag & FLAG_SACK_RENEGING) {
2028                 struct tcp_sock *tp = tcp_sk(sk);
2029                 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
2030                                           msecs_to_jiffies(10));
2031 
2032                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2033                                           delay, TCP_RTO_MAX);
2034                 return true;
2035         }
2036         return false;
2037 }
2038 
2039 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
2040  * counter when SACK is enabled (without SACK, sacked_out is used for
2041  * that purpose).
2042  *
2043  * With reordering, holes may still be in flight, so RFC3517 recovery
2044  * uses pure sacked_out (total number of SACKed segments) even though
2045  * it violates the RFC that uses duplicate ACKs, often these are equal
2046  * but when e.g. out-of-window ACKs or packet duplication occurs,
2047  * they differ. Since neither occurs due to loss, TCP should really
2048  * ignore them.
2049  */
2050 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2051 {
2052         return tp->sacked_out + 1;
2053 }
2054 
2055 /* Linux NewReno/SACK/ECN state machine.
2056  * --------------------------------------
2057  *
2058  * "Open"       Normal state, no dubious events, fast path.
2059  * "Disorder"   In all the respects it is "Open",
2060  *              but requires a bit more attention. It is entered when
2061  *              we see some SACKs or dupacks. It is split of "Open"
2062  *              mainly to move some processing from fast path to slow one.
2063  * "CWR"        CWND was reduced due to some Congestion Notification event.
2064  *              It can be ECN, ICMP source quench, local device congestion.
2065  * "Recovery"   CWND was reduced, we are fast-retransmitting.
2066  * "Loss"       CWND was reduced due to RTO timeout or SACK reneging.
2067  *
2068  * tcp_fastretrans_alert() is entered:
2069  * - each incoming ACK, if state is not "Open"
2070  * - when arrived ACK is unusual, namely:
2071  *      * SACK
2072  *      * Duplicate ACK.
2073  *      * ECN ECE.
2074  *
2075  * Counting packets in flight is pretty simple.
2076  *
2077  *      in_flight = packets_out - left_out + retrans_out
2078  *
2079  *      packets_out is SND.NXT-SND.UNA counted in packets.
2080  *
2081  *      retrans_out is number of retransmitted segments.
2082  *
2083  *      left_out is number of segments left network, but not ACKed yet.
2084  *
2085  *              left_out = sacked_out + lost_out
2086  *
2087  *     sacked_out: Packets, which arrived to receiver out of order
2088  *                 and hence not ACKed. With SACKs this number is simply
2089  *                 amount of SACKed data. Even without SACKs
2090  *                 it is easy to give pretty reliable estimate of this number,
2091  *                 counting duplicate ACKs.
2092  *
2093  *       lost_out: Packets lost by network. TCP has no explicit
2094  *                 "loss notification" feedback from network (for now).
2095  *                 It means that this number can be only _guessed_.
2096  *                 Actually, it is the heuristics to predict lossage that
2097  *                 distinguishes different algorithms.
2098  *
2099  *      F.e. after RTO, when all the queue is considered as lost,
2100  *      lost_out = packets_out and in_flight = retrans_out.
2101  *
2102  *              Essentially, we have now a few algorithms detecting
2103  *              lost packets.
2104  *
2105  *              If the receiver supports SACK:
2106  *
2107  *              RFC6675/3517: It is the conventional algorithm. A packet is
2108  *              considered lost if the number of higher sequence packets
2109  *              SACKed is greater than or equal the DUPACK thoreshold
2110  *              (reordering). This is implemented in tcp_mark_head_lost and
2111  *              tcp_update_scoreboard.
2112  *
2113  *              RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
2114  *              (2017-) that checks timing instead of counting DUPACKs.
2115  *              Essentially a packet is considered lost if it's not S/ACKed
2116  *              after RTT + reordering_window, where both metrics are
2117  *              dynamically measured and adjusted. This is implemented in
2118  *              tcp_rack_mark_lost.
2119  *
2120  *              If the receiver does not support SACK:
2121  *
2122  *              NewReno (RFC6582): in Recovery we assume that one segment
2123  *              is lost (classic Reno). While we are in Recovery and
2124  *              a partial ACK arrives, we assume that one more packet
2125  *              is lost (NewReno). This heuristics are the same in NewReno
2126  *              and SACK.
2127  *
2128  * Really tricky (and requiring careful tuning) part of algorithm
2129  * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
2130  * The first determines the moment _when_ we should reduce CWND and,
2131  * hence, slow down forward transmission. In fact, it determines the moment
2132  * when we decide that hole is caused by loss, rather than by a reorder.
2133  *
2134  * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
2135  * holes, caused by lost packets.
2136  *
2137  * And the most logically complicated part of algorithm is undo
2138  * heuristics. We detect false retransmits due to both too early
2139  * fast retransmit (reordering) and underestimated RTO, analyzing
2140  * timestamps and D-SACKs. When we detect that some segments were
2141  * retransmitted by mistake and CWND reduction was wrong, we undo
2142  * window reduction and abort recovery phase. This logic is hidden
2143  * inside several functions named tcp_try_undo_<something>.
2144  */
2145 
2146 /* This function decides, when we should leave Disordered state
2147  * and enter Recovery phase, reducing congestion window.
2148  *
2149  * Main question: may we further continue forward transmission
2150  * with the same cwnd?
2151  */
2152 static bool tcp_time_to_recover(struct sock *sk, int flag)
2153 {
2154         struct tcp_sock *tp = tcp_sk(sk);
2155 
2156         /* Trick#1: The loss is proven. */
2157         if (tp->lost_out)
2158                 return true;
2159 
2160         /* Not-A-Trick#2 : Classic rule... */
2161         if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
2162                 return true;
2163 
2164         return false;
2165 }
2166 
2167 /* Detect loss in event "A" above by marking head of queue up as lost.
2168  * For non-SACK(Reno) senders, the first "packets" number of segments
2169  * are considered lost. For RFC3517 SACK, a segment is considered lost if it
2170  * has at least tp->reordering SACKed seqments above it; "packets" refers to
2171  * the maximum SACKed segments to pass before reaching this limit.
2172  */
2173 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2174 {
2175         struct tcp_sock *tp = tcp_sk(sk);
2176         struct sk_buff *skb;
2177         int cnt, oldcnt, lost;
2178         unsigned int mss;
2179         /* Use SACK to deduce losses of new sequences sent during recovery */
2180         const u32 loss_high = tcp_is_sack(tp) ?  tp->snd_nxt : tp->high_seq;
2181 
2182         WARN_ON(packets > tp->packets_out);
2183         skb = tp->lost_skb_hint;
2184         if (skb) {
2185                 /* Head already handled? */
2186                 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una))
2187                         return;
2188                 cnt = tp->lost_cnt_hint;
2189         } else {
2190                 skb = tcp_rtx_queue_head(sk);
2191                 cnt = 0;
2192         }
2193 
2194         skb_rbtree_walk_from(skb) {
2195                 /* TODO: do this better */
2196                 /* this is not the most efficient way to do this... */
2197                 tp->lost_skb_hint = skb;
2198                 tp->lost_cnt_hint = cnt;
2199 
2200                 if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
2201                         break;
2202 
2203                 oldcnt = cnt;
2204                 if (tcp_is_reno(tp) ||
2205                     (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2206                         cnt += tcp_skb_pcount(skb);
2207 
2208                 if (cnt > packets) {
2209                         if (tcp_is_sack(tp) ||
2210                             (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
2211                             (oldcnt >= packets))
2212                                 break;
2213 
2214                         mss = tcp_skb_mss(skb);
2215                         /* If needed, chop off the prefix to mark as lost. */
2216                         lost = (packets - oldcnt) * mss;
2217                         if (lost < skb->len &&
2218                             tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2219                                          lost, mss, GFP_ATOMIC) < 0)
2220                                 break;
2221                         cnt = packets;
2222                 }
2223 
2224                 tcp_skb_mark_lost(tp, skb);
2225 
2226                 if (mark_head)
2227                         break;
2228         }
2229         tcp_verify_left_out(tp);
2230 }
2231 
2232 /* Account newly detected lost packet(s) */
2233 
2234 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2235 {
2236         struct tcp_sock *tp = tcp_sk(sk);
2237 
2238         if (tcp_is_sack(tp)) {
2239                 int sacked_upto = tp->sacked_out - tp->reordering;
2240                 if (sacked_upto >= 0)
2241                         tcp_mark_head_lost(sk, sacked_upto, 0);
2242                 else if (fast_rexmit)
2243                         tcp_mark_head_lost(sk, 1, 1);
2244         }
2245 }
2246 
2247 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2248 {
2249         return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2250                before(tp->rx_opt.rcv_tsecr, when);
2251 }
2252 
2253 /* skb is spurious retransmitted if the returned timestamp echo
2254  * reply is prior to the skb transmission time
2255  */
2256 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
2257                                      const struct sk_buff *skb)
2258 {
2259         return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
2260                tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
2261 }
2262 
2263 /* Nothing was retransmitted or returned timestamp is less
2264  * than timestamp of the first retransmission.
2265  */
2266 static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2267 {
2268         return !tp->retrans_stamp ||
2269                tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
2270 }
2271 
2272 /* Undo procedures. */
2273 
2274 /* We can clear retrans_stamp when there are no retransmissions in the
2275  * window. It would seem that it is trivially available for us in
2276  * tp->retrans_out, however, that kind of assumptions doesn't consider
2277  * what will happen if errors occur when sending retransmission for the
2278  * second time. ...It could the that such segment has only
2279  * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2280  * the head skb is enough except for some reneging corner cases that
2281  * are not worth the effort.
2282  *
2283  * Main reason for all this complexity is the fact that connection dying
2284  * time now depends on the validity of the retrans_stamp, in particular,
2285  * that successive retransmissions of a segment must not advance
2286  * retrans_stamp under any conditions.
2287  */
2288 static bool tcp_any_retrans_done(const struct sock *sk)
2289 {
2290         const struct tcp_sock *tp = tcp_sk(sk);
2291         struct sk_buff *skb;
2292 
2293         if (tp->retrans_out)
2294                 return true;
2295 
2296         skb = tcp_rtx_queue_head(sk);
2297         if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2298                 return true;
2299 
2300         return false;
2301 }
2302 
2303 static void DBGUNDO(struct sock *sk, const char *msg)
2304 {
2305 #if FASTRETRANS_DEBUG > 1
2306         struct tcp_sock *tp = tcp_sk(sk);
2307         struct inet_sock *inet = inet_sk(sk);
2308 
2309         if (sk->sk_family == AF_INET) {
2310                 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2311                          msg,
2312                          &inet->inet_daddr, ntohs(inet->inet_dport),
2313                          tp->snd_cwnd, tcp_left_out(tp),
2314                          tp->snd_ssthresh, tp->prior_ssthresh,
2315                          tp->packets_out);
2316         }
2317 #if IS_ENABLED(CONFIG_IPV6)
2318         else if (sk->sk_family == AF_INET6) {
2319                 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2320                          msg,
2321                          &sk->sk_v6_daddr, ntohs(inet->inet_dport),
2322                          tp->snd_cwnd, tcp_left_out(tp),
2323                          tp->snd_ssthresh, tp->prior_ssthresh,
2324                          tp->packets_out);
2325         }
2326 #endif
2327 #endif
2328 }
2329 
2330 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2331 {
2332         struct tcp_sock *tp = tcp_sk(sk);
2333 
2334         if (unmark_loss) {
2335                 struct sk_buff *skb;
2336 
2337                 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2338                         TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2339                 }
2340                 tp->lost_out = 0;
2341                 tcp_clear_all_retrans_hints(tp);
2342         }
2343 
2344         if (tp->prior_ssthresh) {
2345                 const struct inet_connection_sock *icsk = inet_csk(sk);
2346 
2347                 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
2348 
2349                 if (tp->prior_ssthresh > tp->snd_ssthresh) {
2350                         tp->snd_ssthresh = tp->prior_ssthresh;
2351                         tcp_ecn_withdraw_cwr(tp);
2352                 }
2353         }
2354         tp->snd_cwnd_stamp = tcp_jiffies32;
2355         tp->undo_marker = 0;
2356         tp->rack.advanced = 1; /* Force RACK to re-exam losses */
2357 }
2358 
2359 static inline bool tcp_may_undo(const struct tcp_sock *tp)
2360 {
2361         return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2362 }
2363 
2364 /* People celebrate: "We love our President!" */
2365 static bool tcp_try_undo_recovery(struct sock *sk)
2366 {
2367         struct tcp_sock *tp = tcp_sk(sk);
2368 
2369         if (tcp_may_undo(tp)) {
2370                 int mib_idx;
2371 
2372                 /* Happy end! We did not retransmit anything
2373                  * or our original transmission succeeded.
2374                  */
2375                 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2376                 tcp_undo_cwnd_reduction(sk, false);
2377                 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2378                         mib_idx = LINUX_MIB_TCPLOSSUNDO;
2379                 else
2380                         mib_idx = LINUX_MIB_TCPFULLUNDO;
2381 
2382                 NET_INC_STATS(sock_net(sk), mib_idx);
2383         } else if (tp->rack.reo_wnd_persist) {
2384                 tp->rack.reo_wnd_persist--;
2385         }
2386         if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2387                 /* Hold old state until something *above* high_seq
2388                  * is ACKed. For Reno it is MUST to prevent false
2389                  * fast retransmits (RFC2582). SACK TCP is safe. */
2390                 if (!tcp_any_retrans_done(sk))
2391                         tp->retrans_stamp = 0;
2392                 return true;
2393         }
2394         tcp_set_ca_state(sk, TCP_CA_Open);
2395         tp->is_sack_reneg = 0;
2396         return false;
2397 }
2398 
2399 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
2400 static bool tcp_try_undo_dsack(struct sock *sk)
2401 {
2402         struct tcp_sock *tp = tcp_sk(sk);
2403 
2404         if (tp->undo_marker && !tp->undo_retrans) {
2405                 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH,
2406                                                tp->rack.reo_wnd_persist + 1);
2407                 DBGUNDO(sk, "D-SACK");
2408                 tcp_undo_cwnd_reduction(sk, false);
2409                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2410                 return true;
2411         }
2412         return false;
2413 }
2414 
2415 /* Undo during loss recovery after partial ACK or using F-RTO. */
2416 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2417 {
2418         struct tcp_sock *tp = tcp_sk(sk);
2419 
2420         if (frto_undo || tcp_may_undo(tp)) {
2421                 tcp_undo_cwnd_reduction(sk, true);
2422 
2423                 DBGUNDO(sk, "partial loss");
2424                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2425                 if (frto_undo)
2426                         NET_INC_STATS(sock_net(sk),
2427                                         LINUX_MIB_TCPSPURIOUSRTOS);
2428                 inet_csk(sk)->icsk_retransmits = 0;
2429                 if (frto_undo || tcp_is_sack(tp)) {
2430                         tcp_set_ca_state(sk, TCP_CA_Open);
2431                         tp->is_sack_reneg = 0;
2432                 }
2433                 return true;
2434         }
2435         return false;
2436 }
2437 
2438 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
2439  * It computes the number of packets to send (sndcnt) based on packets newly
2440  * delivered:
2441  *   1) If the packets in flight is larger than ssthresh, PRR spreads the
2442  *      cwnd reductions across a full RTT.
2443  *   2) Otherwise PRR uses packet conservation to send as much as delivered.
2444  *      But when the retransmits are acked without further losses, PRR
2445  *      slow starts cwnd up to ssthresh to speed up the recovery.
2446  */
2447 static void tcp_init_cwnd_reduction(struct sock *sk)
2448 {
2449         struct tcp_sock *tp = tcp_sk(sk);
2450 
2451         tp->high_seq = tp->snd_nxt;
2452         tp->tlp_high_seq = 0;
2453         tp->snd_cwnd_cnt = 0;
2454         tp->prior_cwnd = tp->snd_cwnd;
2455         tp->prr_delivered = 0;
2456         tp->prr_out = 0;
2457         tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2458         tcp_ecn_queue_cwr(tp);
2459 }
2460 
2461 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
2462 {
2463         struct tcp_sock *tp = tcp_sk(sk);
2464         int sndcnt = 0;
2465         int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2466 
2467         if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
2468                 return;
2469 
2470         tp->prr_delivered += newly_acked_sacked;
2471         if (delta < 0) {
2472                 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2473                                tp->prior_cwnd - 1;
2474                 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2475         } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
2476                    !(flag & FLAG_LOST_RETRANS)) {
2477                 sndcnt = min_t(int, delta,
2478                                max_t(int, tp->prr_delivered - tp->prr_out,
2479                                      newly_acked_sacked) + 1);
2480         } else {
2481                 sndcnt = min(delta, newly_acked_sacked);
2482         }
2483         /* Force a fast retransmit upon entering fast recovery */
2484         sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
2485         tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
2486 }
2487 
2488 static inline void tcp_end_cwnd_reduction(struct sock *sk)
2489 {
2490         struct tcp_sock *tp = tcp_sk(sk);
2491 
2492         if (inet_csk(sk)->icsk_ca_ops->cong_control)
2493                 return;
2494 
2495         /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2496         if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2497             (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2498                 tp->snd_cwnd = tp->snd_ssthresh;
2499                 tp->snd_cwnd_stamp = tcp_jiffies32;
2500         }
2501         tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2502 }
2503 
2504 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
2505 void tcp_enter_cwr(struct sock *sk)
2506 {
2507         struct tcp_sock *tp = tcp_sk(sk);
2508 
2509         tp->prior_ssthresh = 0;
2510         if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2511                 tp->undo_marker = 0;
2512                 tcp_init_cwnd_reduction(sk);
2513                 tcp_set_ca_state(sk, TCP_CA_CWR);
2514         }
2515 }
2516 EXPORT_SYMBOL(tcp_enter_cwr);
2517 
2518 static void tcp_try_keep_open(struct sock *sk)
2519 {
2520         struct tcp_sock *tp = tcp_sk(sk);
2521         int state = TCP_CA_Open;
2522 
2523         if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
2524                 state = TCP_CA_Disorder;
2525 
2526         if (inet_csk(sk)->icsk_ca_state != state) {
2527                 tcp_set_ca_state(sk, state);
2528                 tp->high_seq = tp->snd_nxt;
2529         }
2530 }
2531 
2532 static void tcp_try_to_open(struct sock *sk, int flag)
2533 {
2534         struct tcp_sock *tp = tcp_sk(sk);
2535 
2536         tcp_verify_left_out(tp);
2537 
2538         if (!tcp_any_retrans_done(sk))
2539                 tp->retrans_stamp = 0;
2540 
2541         if (flag & FLAG_ECE)
2542                 tcp_enter_cwr(sk);
2543 
2544         if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2545                 tcp_try_keep_open(sk);
2546         }
2547 }
2548 
2549 static void tcp_mtup_probe_failed(struct sock *sk)
2550 {
2551         struct inet_connection_sock *icsk = inet_csk(sk);
2552 
2553         icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2554         icsk->icsk_mtup.probe_size = 0;
2555         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
2556 }
2557 
2558 static void tcp_mtup_probe_success(struct sock *sk)
2559 {
2560         struct tcp_sock *tp = tcp_sk(sk);
2561         struct inet_connection_sock *icsk = inet_csk(sk);
2562 
2563         /* FIXME: breaks with very large cwnd */
2564         tp->prior_ssthresh = tcp_current_ssthresh(sk);
2565         tp->snd_cwnd = tp->snd_cwnd *
2566                        tcp_mss_to_mtu(sk, tp->mss_cache) /
2567                        icsk->icsk_mtup.probe_size;
2568         tp->snd_cwnd_cnt = 0;
2569         tp->snd_cwnd_stamp = tcp_jiffies32;
2570         tp->snd_ssthresh = tcp_current_ssthresh(sk);
2571 
2572         icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2573         icsk->icsk_mtup.probe_size = 0;
2574         tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2575         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
2576 }
2577 
2578 /* Do a simple retransmit without using the backoff mechanisms in
2579  * tcp_timer. This is used for path mtu discovery.
2580  * The socket is already locked here.
2581  */
2582 void tcp_simple_retransmit(struct sock *sk)
2583 {
2584         const struct inet_connection_sock *icsk = inet_csk(sk);
2585         struct tcp_sock *tp = tcp_sk(sk);
2586         struct sk_buff *skb;
2587         unsigned int mss = tcp_current_mss(sk);
2588 
2589         skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2590                 if (tcp_skb_seglen(skb) > mss &&
2591                     !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
2592                         if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2593                                 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
2594                                 tp->retrans_out -= tcp_skb_pcount(skb);
2595                         }
2596                         tcp_skb_mark_lost_uncond_verify(tp, skb);
2597                 }
2598         }
2599 
2600         tcp_clear_retrans_hints_partial(tp);
2601 
2602         if (!tp->lost_out)
2603                 return;
2604 
2605         if (tcp_is_reno(tp))
2606                 tcp_limit_reno_sacked(tp);
2607 
2608         tcp_verify_left_out(tp);
2609 
2610         /* Don't muck with the congestion window here.
2611          * Reason is that we do not increase amount of _data_
2612          * in network, but units changed and effective
2613          * cwnd/ssthresh really reduced now.
2614          */
2615         if (icsk->icsk_ca_state != TCP_CA_Loss) {
2616                 tp->high_seq = tp->snd_nxt;
2617                 tp->snd_ssthresh = tcp_current_ssthresh(sk);
2618                 tp->prior_ssthresh = 0;
2619                 tp->undo_marker = 0;
2620                 tcp_set_ca_state(sk, TCP_CA_Loss);
2621         }
2622         tcp_xmit_retransmit_queue(sk);
2623 }
2624 EXPORT_SYMBOL(tcp_simple_retransmit);
2625 
2626 void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2627 {
2628         struct tcp_sock *tp = tcp_sk(sk);
2629         int mib_idx;
2630 
2631         if (tcp_is_reno(tp))
2632                 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2633         else
2634                 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2635 
2636         NET_INC_STATS(sock_net(sk), mib_idx);
2637 
2638         tp->prior_ssthresh = 0;
2639         tcp_init_undo(tp);
2640 
2641         if (!tcp_in_cwnd_reduction(sk)) {
2642                 if (!ece_ack)
2643                         tp->prior_ssthresh = tcp_current_ssthresh(sk);
2644                 tcp_init_cwnd_reduction(sk);
2645         }
2646         tcp_set_ca_state(sk, TCP_CA_Recovery);
2647 }
2648 
2649 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
2650  * recovered or spurious. Otherwise retransmits more on partial ACKs.
2651  */
2652 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
2653                              int *rexmit)
2654 {
2655         struct tcp_sock *tp = tcp_sk(sk);
2656         bool recovered = !before(tp->snd_una, tp->high_seq);
2657 
2658         if ((flag & FLAG_SND_UNA_ADVANCED) &&
2659             tcp_try_undo_loss(sk, false))
2660                 return;
2661 
2662         if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2663                 /* Step 3.b. A timeout is spurious if not all data are
2664                  * lost, i.e., never-retransmitted data are (s)acked.
2665                  */
2666                 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2667                     tcp_try_undo_loss(sk, true))
2668                         return;
2669 
2670                 if (after(tp->snd_nxt, tp->high_seq)) {
2671                         if (flag & FLAG_DATA_SACKED || is_dupack)
2672                                 tp->frto = 0; /* Step 3.a. loss was real */
2673                 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2674                         tp->high_seq = tp->snd_nxt;
2675                         /* Step 2.b. Try send new data (but deferred until cwnd
2676                          * is updated in tcp_ack()). Otherwise fall back to
2677                          * the conventional recovery.
2678                          */
2679                         if (!tcp_write_queue_empty(sk) &&
2680                             after(tcp_wnd_end(tp), tp->snd_nxt)) {
2681                                 *rexmit = REXMIT_NEW;
2682                                 return;
2683                         }
2684                         tp->frto = 0;
2685                 }
2686         }
2687 
2688         if (recovered) {
2689                 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
2690                 tcp_try_undo_recovery(sk);
2691                 return;
2692         }
2693         if (tcp_is_reno(tp)) {
2694                 /* A Reno DUPACK means new data in F-RTO step 2.b above are
2695                  * delivered. Lower inflight to clock out (re)tranmissions.
2696                  */
2697                 if (after(tp->snd_nxt, tp->high_seq) && is_dupack)
2698                         tcp_add_reno_sack(sk);
2699                 else if (flag & FLAG_SND_UNA_ADVANCED)
2700                         tcp_reset_reno_sack(tp);
2701         }
2702         *rexmit = REXMIT_LOST;
2703 }
2704 
2705 /* Undo during fast recovery after partial ACK. */
2706 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
2707 {
2708         struct tcp_sock *tp = tcp_sk(sk);
2709 
2710         if (tp->undo_marker && tcp_packet_delayed(tp)) {
2711                 /* Plain luck! Hole if filled with delayed
2712                  * packet, rather than with a retransmit. Check reordering.
2713                  */
2714                 tcp_check_sack_reordering(sk, prior_snd_una, 1);
2715 
2716                 /* We are getting evidence that the reordering degree is higher
2717                  * than we realized. If there are no retransmits out then we
2718                  * can undo. Otherwise we clock out new packets but do not
2719                  * mark more packets lost or retransmit more.
2720                  */
2721                 if (tp->retrans_out)
2722                         return true;
2723 
2724                 if (!tcp_any_retrans_done(sk))
2725                         tp->retrans_stamp = 0;
2726 
2727                 DBGUNDO(sk, "partial recovery");
2728                 tcp_undo_cwnd_reduction(sk, true);
2729                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2730                 tcp_try_keep_open(sk);
2731                 return true;
2732         }
2733         return false;
2734 }
2735 
2736 static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
2737 {
2738         struct tcp_sock *tp = tcp_sk(sk);
2739 
2740         if (tcp_rtx_queue_empty(sk))
2741                 return;
2742 
2743         if (unlikely(tcp_is_reno(tp))) {
2744                 tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
2745         } else if (tcp_is_rack(sk)) {
2746                 u32 prior_retrans = tp->retrans_out;
2747 
2748                 tcp_rack_mark_lost(sk);
2749                 if (prior_retrans > tp->retrans_out)
2750                         *ack_flag |= FLAG_LOST_RETRANS;
2751         }
2752 }
2753 
2754 static bool tcp_force_fast_retransmit(struct sock *sk)
2755 {
2756         struct tcp_sock *tp = tcp_sk(sk);
2757 
2758         return after(tcp_highest_sack_seq(tp),
2759                      tp->snd_una + tp->reordering * tp->mss_cache);
2760 }
2761 
2762 /* Process an event, which can update packets-in-flight not trivially.
2763  * Main goal of this function is to calculate new estimate for left_out,
2764  * taking into account both packets sitting in receiver's buffer and
2765  * packets lost by network.
2766  *
2767  * Besides that it updates the congestion state when packet loss or ECN
2768  * is detected. But it does not reduce the cwnd, it is done by the
2769  * congestion control later.
2770  *
2771  * It does _not_ decide what to send, it is made in function
2772  * tcp_xmit_retransmit_queue().
2773  */
2774 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2775                                   bool is_dupack, int *ack_flag, int *rexmit)
2776 {
2777         struct inet_connection_sock *icsk = inet_csk(sk);
2778         struct tcp_sock *tp = tcp_sk(sk);
2779         int fast_rexmit = 0, flag = *ack_flag;
2780         bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2781                                      tcp_force_fast_retransmit(sk));
2782 
2783         if (!tp->packets_out && tp->sacked_out)
2784                 tp->sacked_out = 0;
2785 
2786         /* Now state machine starts.
2787          * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
2788         if (flag & FLAG_ECE)
2789                 tp->prior_ssthresh = 0;
2790 
2791         /* B. In all the states check for reneging SACKs. */
2792         if (tcp_check_sack_reneging(sk, flag))
2793                 return;
2794 
2795         /* C. Check consistency of the current state. */
2796         tcp_verify_left_out(tp);
2797 
2798         /* D. Check state exit conditions. State can be terminated
2799          *    when high_seq is ACKed. */
2800         if (icsk->icsk_ca_state == TCP_CA_Open) {
2801                 WARN_ON(tp->retrans_out != 0);
2802                 tp->retrans_stamp = 0;
2803         } else if (!before(tp->snd_una, tp->high_seq)) {
2804                 switch (icsk->icsk_ca_state) {
2805                 case TCP_CA_CWR:
2806                         /* CWR is to be held something *above* high_seq
2807                          * is ACKed for CWR bit to reach receiver. */
2808                         if (tp->snd_una != tp->high_seq) {
2809                                 tcp_end_cwnd_reduction(sk);
2810                                 tcp_set_ca_state(sk, TCP_CA_Open);
2811                         }
2812                         break;
2813 
2814                 case TCP_CA_Recovery:
2815                         if (tcp_is_reno(tp))
2816                                 tcp_reset_reno_sack(tp);
2817                         if (tcp_try_undo_recovery(sk))
2818                                 return;
2819                         tcp_end_cwnd_reduction(sk);
2820                         break;
2821                 }
2822         }
2823 
2824         /* E. Process state. */
2825         switch (icsk->icsk_ca_state) {
2826         case TCP_CA_Recovery:
2827                 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
2828                         if (tcp_is_reno(tp) && is_dupack)
2829                                 tcp_add_reno_sack(sk);
2830                 } else {
2831                         if (tcp_try_undo_partial(sk, prior_snd_una))
2832                                 return;
2833                         /* Partial ACK arrived. Force fast retransmit. */
2834                         do_lost = tcp_is_reno(tp) ||
2835                                   tcp_force_fast_retransmit(sk);
2836                 }
2837                 if (tcp_try_undo_dsack(sk)) {
2838                         tcp_try_keep_open(sk);
2839                         return;
2840                 }
2841                 tcp_identify_packet_loss(sk, ack_flag);
2842                 break;
2843         case TCP_CA_Loss:
2844                 tcp_process_loss(sk, flag, is_dupack, rexmit);
2845                 tcp_identify_packet_loss(sk, ack_flag);
2846                 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
2847                       (*ack_flag & FLAG_LOST_RETRANS)))
2848                         return;
2849                 /* Change state if cwnd is undone or retransmits are lost */
2850                 /* fall through */
2851         default:
2852                 if (tcp_is_reno(tp)) {
2853                         if (flag & FLAG_SND_UNA_ADVANCED)
2854                                 tcp_reset_reno_sack(tp);
2855                         if (is_dupack)
2856                                 tcp_add_reno_sack(sk);
2857                 }
2858 
2859                 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
2860                         tcp_try_undo_dsack(sk);
2861 
2862                 tcp_identify_packet_loss(sk, ack_flag);
2863                 if (!tcp_time_to_recover(sk, flag)) {
2864                         tcp_try_to_open(sk, flag);
2865                         return;
2866                 }
2867 
2868                 /* MTU probe failure: don't reduce cwnd */
2869                 if (icsk->icsk_ca_state < TCP_CA_CWR &&
2870                     icsk->icsk_mtup.probe_size &&
2871                     tp->snd_una == tp->mtu_probe.probe_seq_start) {
2872                         tcp_mtup_probe_failed(sk);
2873                         /* Restores the reduction we did in tcp_mtup_probe() */
2874                         tp->snd_cwnd++;
2875                         tcp_simple_retransmit(sk);
2876                         return;
2877                 }
2878 
2879                 /* Otherwise enter Recovery state */
2880                 tcp_enter_recovery(sk, (flag & FLAG_ECE));
2881                 fast_rexmit = 1;
2882         }
2883 
2884         if (!tcp_is_rack(sk) && do_lost)
2885                 tcp_update_scoreboard(sk, fast_rexmit);
2886         *rexmit = REXMIT_LOST;
2887 }
2888 
2889 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
2890 {
2891         u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
2892         struct tcp_sock *tp = tcp_sk(sk);
2893 
2894         if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
2895                 /* If the remote keeps returning delayed ACKs, eventually
2896                  * the min filter would pick it up and overestimate the
2897                  * prop. delay when it expires. Skip suspected delayed ACKs.
2898                  */
2899                 return;
2900         }
2901         minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
2902                            rtt_us ? : jiffies_to_usecs(1));
2903 }
2904 
2905 static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2906                                long seq_rtt_us, long sack_rtt_us,
2907                                long ca_rtt_us, struct rate_sample *rs)
2908 {
2909         const struct tcp_sock *tp = tcp_sk(sk);
2910 
2911         /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
2912          * broken middle-boxes or peers may corrupt TS-ECR fields. But
2913          * Karn's algorithm forbids taking RTT if some retransmitted data
2914          * is acked (RFC6298).
2915          */
2916         if (seq_rtt_us < 0)
2917                 seq_rtt_us = sack_rtt_us;
2918 
2919         /* RTTM Rule: A TSecr value received in a segment is used to
2920          * update the averaged RTT measurement only if the segment
2921          * acknowledges some new data, i.e., only if it advances the
2922          * left edge of the send window.
2923          * See draft-ietf-tcplw-high-performance-00, section 3.3.
2924          */
2925         if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2926             flag & FLAG_ACKED) {
2927                 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
2928                 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
2929 
2930                 seq_rtt_us = ca_rtt_us = delta_us;
2931         }
2932         rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
2933         if (seq_rtt_us < 0)
2934                 return false;
2935 
2936         /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
2937          * always taken together with ACK, SACK, or TS-opts. Any negative
2938          * values will be skipped with the seq_rtt_us < 0 check above.
2939          */
2940         tcp_update_rtt_min(sk, ca_rtt_us, flag);
2941         tcp_rtt_estimator(sk, seq_rtt_us);
2942         tcp_set_rto(sk);
2943 
2944         /* RFC6298: only reset backoff on valid RTT measurement. */
2945         inet_csk(sk)->icsk_backoff = 0;
2946         return true;
2947 }
2948 
2949 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
2950 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
2951 {
2952         struct rate_sample rs;
2953         long rtt_us = -1L;
2954 
2955         if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
2956                 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
2957 
2958         tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
2959 }
2960 
2961 
2962 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2963 {
2964         const struct inet_connection_sock *icsk = inet_csk(sk);
2965 
2966         icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
2967         tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
2968 }
2969 
2970 /* Restart timer after forward progress on connection.
2971  * RFC2988 recommends to restart timer to now+rto.
2972  */
2973 void tcp_rearm_rto(struct sock *sk)
2974 {
2975         const struct inet_connection_sock *icsk = inet_csk(sk);
2976         struct tcp_sock *tp = tcp_sk(sk);
2977 
2978         /* If the retrans timer is currently being used by Fast Open
2979          * for SYN-ACK retrans purpose, stay put.
2980          */
2981         if (tp->fastopen_rsk)
2982                 return;
2983 
2984         if (!tp->packets_out) {
2985                 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
2986         } else {
2987                 u32 rto = inet_csk(sk)->icsk_rto;
2988                 /* Offset the time elapsed after installing regular RTO */
2989                 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2990                     icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2991                         s64 delta_us = tcp_rto_delta_us(sk);
2992                         /* delta_us may not be positive if the socket is locked
2993                          * when the retrans timer fires and is rescheduled.
2994                          */
2995                         rto = usecs_to_jiffies(max_t(int, delta_us, 1));
2996                 }
2997                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
2998                                           TCP_RTO_MAX);
2999         }
3000 }
3001 
3002 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
3003 static void tcp_set_xmit_timer(struct sock *sk)
3004 {
3005         if (!tcp_schedule_loss_probe(sk, true))
3006                 tcp_rearm_rto(sk);
3007 }
3008 
3009 /* If we get here, the whole TSO packet has not been acked. */
3010 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3011 {
3012         struct tcp_sock *tp = tcp_sk(sk);
3013         u32 packets_acked;
3014 
3015         BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
3016 
3017         packets_acked = tcp_skb_pcount(skb);
3018         if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3019                 return 0;
3020         packets_acked -= tcp_skb_pcount(skb);
3021 
3022         if (packets_acked) {
3023                 BUG_ON(tcp_skb_pcount(skb) == 0);
3024                 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
3025         }
3026 
3027         return packets_acked;
3028 }
3029 
3030 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3031                            u32 prior_snd_una)
3032 {
3033         const struct skb_shared_info *shinfo;
3034 
3035         /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3036         if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
3037                 return;
3038 
3039         shinfo = skb_shinfo(skb);
3040         if (!before(shinfo->tskey, prior_snd_una) &&
3041             before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
3042                 tcp_skb_tsorted_save(skb) {
3043                         __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3044                 } tcp_skb_tsorted_restore(skb);
3045         }
3046 }
3047 
3048 /* Remove acknowledged frames from the retransmission queue. If our packet
3049  * is before the ack sequence we can discard it as it's confirmed to have
3050  * arrived at the other end.
3051  */
3052 static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3053                                u32 prior_snd_una,
3054                                struct tcp_sacktag_state *sack)
3055 {
3056         const struct inet_connection_sock *icsk = inet_csk(sk);
3057         u64 first_ackt, last_ackt;
3058         struct tcp_sock *tp = tcp_sk(sk);
3059         u32 prior_sacked = tp->sacked_out;
3060         u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */
3061         struct sk_buff *skb, *next;
3062         bool fully_acked = true;
3063         long sack_rtt_us = -1L;
3064         long seq_rtt_us = -1L;
3065         long ca_rtt_us = -1L;
3066         u32 pkts_acked = 0;
3067         u32 last_in_flight = 0;
3068         bool rtt_update;
3069         int flag = 0;
3070 
3071         first_ackt = 0;
3072 
3073         for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
3074                 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3075                 const u32 start_seq = scb->seq;
3076                 u8 sacked = scb->sacked;
3077                 u32 acked_pcount;
3078 
3079                 tcp_ack_tstamp(sk, skb, prior_snd_una);
3080 
3081                 /* Determine how many packets and what bytes were acked, tso and else */
3082                 if (after(scb->end_seq, tp->snd_una)) {
3083                         if (tcp_skb_pcount(skb) == 1 ||
3084                             !after(tp->snd_una, scb->seq))
3085                                 break;
3086 
3087                         acked_pcount = tcp_tso_acked(sk, skb);
3088                         if (!acked_pcount)
3089                                 break;
3090                         fully_acked = false;
3091                 } else {
3092                         acked_pcount = tcp_skb_pcount(skb);
3093                 }
3094 
3095                 if (unlikely(sacked & TCPCB_RETRANS)) {
3096                         if (sacked & TCPCB_SACKED_RETRANS)
3097                                 tp->retrans_out -= acked_pcount;
3098                         flag |= FLAG_RETRANS_DATA_ACKED;
3099                 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3100                         last_ackt = skb->skb_mstamp;
3101                         WARN_ON_ONCE(last_ackt == 0);
3102                         if (!first_ackt)
3103                                 first_ackt = last_ackt;
3104 
3105                         last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
3106                         if (before(start_seq, reord))
3107                                 reord = start_seq;
3108                         if (!after(scb->end_seq, tp->high_seq))
3109                                 flag |= FLAG_ORIG_SACK_ACKED;
3110                 }
3111 
3112                 if (sacked & TCPCB_SACKED_ACKED) {
3113                         tp->sacked_out -= acked_pcount;
3114                 } else if (tcp_is_sack(tp)) {
3115                         tp->delivered += acked_pcount;
3116                         if (!tcp_skb_spurious_retrans(tp, skb))
3117                                 tcp_rack_advance(tp, sacked, scb->end_seq,
3118                                                  skb->skb_mstamp);
3119                 }
3120                 if (sacked & TCPCB_LOST)
3121                         tp->lost_out -= acked_pcount;
3122 
3123                 tp->packets_out -= acked_pcount;
3124                 pkts_acked += acked_pcount;
3125                 tcp_rate_skb_delivered(sk, skb, sack->rate);
3126 
3127                 /* Initial outgoing SYN's get put onto the write_queue
3128                  * just like anything else we transmit.  It is not
3129                  * true data, and if we misinform our callers that
3130                  * this ACK acks real data, we will erroneously exit
3131                  * connection startup slow start one packet too
3132                  * quickly.  This is severely frowned upon behavior.
3133                  */
3134                 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) {
3135                         flag |= FLAG_DATA_ACKED;
3136                 } else {
3137                         flag |= FLAG_SYN_ACKED;
3138                         tp->retrans_stamp = 0;
3139                 }
3140 
3141                 if (!fully_acked)
3142                         break;
3143 
3144                 next = skb_rb_next(skb);
3145                 if (unlikely(skb == tp->retransmit_skb_hint))
3146                         tp->retransmit_skb_hint = NULL;
3147                 if (unlikely(skb == tp->lost_skb_hint))
3148                         tp->lost_skb_hint = NULL;
3149                 tcp_rtx_queue_unlink_and_free(skb, sk);
3150         }
3151 
3152         if (!skb)
3153                 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3154 
3155         if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
3156                 tp->snd_up = tp->snd_una;
3157 
3158         if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3159                 flag |= FLAG_SACK_RENEGING;
3160 
3161         if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3162                 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
3163                 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
3164 
3165                 if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
3166                     last_in_flight && !prior_sacked && fully_acked &&
3167                     sack->rate->prior_delivered + 1 == tp->delivered &&
3168                     !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
3169                         /* Conservatively mark a delayed ACK. It's typically
3170                          * from a lone runt packet over the round trip to
3171                          * a receiver w/o out-of-order or CE events.
3172                          */
3173                         flag |= FLAG_ACK_MAYBE_DELAYED;
3174                 }
3175         }
3176         if (sack->first_sackt) {
3177                 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
3178                 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
3179         }
3180         rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
3181                                         ca_rtt_us, sack->rate);
3182 
3183         if (flag & FLAG_ACKED) {
3184                 flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
3185                 if (unlikely(icsk->icsk_mtup.probe_size &&
3186                              !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3187                         tcp_mtup_probe_success(sk);
3188                 }
3189 
3190                 if (tcp_is_reno(tp)) {
3191                         tcp_remove_reno_sacks(sk, pkts_acked);
3192 
3193                         /* If any of the cumulatively ACKed segments was
3194                          * retransmitted, non-SACK case cannot confirm that
3195                          * progress was due to original transmission due to
3196                          * lack of TCPCB_SACKED_ACKED bits even if some of
3197                          * the packets may have been never retransmitted.
3198                          */
3199                         if (flag & FLAG_RETRANS_DATA_ACKED)
3200                                 flag &= ~FLAG_ORIG_SACK_ACKED;
3201                 } else {
3202                         int delta;
3203 
3204                         /* Non-retransmitted hole got filled? That's reordering */
3205                         if (before(reord, prior_fack))
3206                                 tcp_check_sack_reordering(sk, reord, 0);
3207 
3208                         delta = prior_sacked - tp->sacked_out;
3209                         tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3210                 }
3211         } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3212                    sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
3213                 /* Do not re-arm RTO if the sack RTT is measured from data sent
3214                  * after when the head was last (re)transmitted. Otherwise the
3215                  * timeout may continue to extend in loss recovery.
3216                  */
3217                 flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
3218         }
3219 
3220         if (icsk->icsk_ca_ops->pkts_acked) {
3221                 struct ack_sample sample = { .pkts_acked = pkts_acked,
3222                                              .rtt_us = sack->rate->rtt_us,
3223                                              .in_flight = last_in_flight };
3224 
3225                 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
3226         }
3227 
3228 #if FASTRETRANS_DEBUG > 0
3229         WARN_ON((int)tp->sacked_out < 0);
3230         WARN_ON((int)tp->lost_out < 0);
3231         WARN_ON((int)tp->retrans_out < 0);
3232         if (!tp->packets_out && tcp_is_sack(tp)) {
3233                 icsk = inet_csk(sk);
3234                 if (tp->lost_out) {
3235                         pr_debug("Leak l=%u %d\n",
3236                                  tp->lost_out, icsk->icsk_ca_state);
3237                         tp->lost_out = 0;
3238                 }
3239                 if (tp->sacked_out) {
3240                         pr_debug("Leak s=%u %d\n",
3241                                  tp->sacked_out, icsk->icsk_ca_state);
3242                         tp->sacked_out = 0;
3243                 }
3244                 if (tp->retrans_out) {
3245                         pr_debug("Leak r=%u %d\n",
3246                                  tp->retrans_out, icsk->icsk_ca_state);
3247                         tp->retrans_out = 0;
3248                 }
3249         }
3250 #endif
3251         return flag;
3252 }
3253 
3254 static void tcp_ack_probe(struct sock *sk)
3255 {
3256         struct inet_connection_sock *icsk = inet_csk(sk);
3257         struct sk_buff *head = tcp_send_head(sk);
3258         const struct tcp_sock *tp = tcp_sk(sk);
3259 
3260         /* Was it a usable window open? */
3261         if (!head)
3262                 return;
3263         if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
3264                 icsk->icsk_backoff = 0;
3265                 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
3266                 /* Socket must be waked up by subsequent tcp_data_snd_check().
3267                  * This function is not for random using!
3268                  */
3269         } else {
3270                 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
3271 
3272                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3273                                           when, TCP_RTO_MAX);
3274         }
3275 }
3276 
3277 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3278 {
3279         return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3280                 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3281 }
3282 
3283 /* Decide wheather to run the increase function of congestion control. */
3284 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3285 {
3286         /* If reordering is high then always grow cwnd whenever data is
3287          * delivered regardless of its ordering. Otherwise stay conservative
3288          * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
3289          * new SACK or ECE mark may first advance cwnd here and later reduce
3290          * cwnd in tcp_fastretrans_alert() based on more states.
3291          */
3292         if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
3293                 return flag & FLAG_FORWARD_PROGRESS;
3294 
3295         return flag & FLAG_DATA_ACKED;
3296 }
3297 
3298 /* The "ultimate" congestion control function that aims to replace the rigid
3299  * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
3300  * It's called toward the end of processing an ACK with precise rate
3301  * information. All transmission or retransmission are delayed afterwards.
3302  */
3303 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
3304                              int flag, const struct rate_sample *rs)
3305 {
3306         const struct inet_connection_sock *icsk = inet_csk(sk);
3307 
3308         if (icsk->icsk_ca_ops->cong_control) {
3309                 icsk->icsk_ca_ops->cong_control(sk, rs);
3310                 return;
3311         }
3312 
3313         if (tcp_in_cwnd_reduction(sk)) {
3314                 /* Reduce cwnd if state mandates */
3315                 tcp_cwnd_reduction(sk, acked_sacked, flag);
3316         } else if (tcp_may_raise_cwnd(sk, flag)) {
3317                 /* Advance cwnd if state allows */
3318                 tcp_cong_avoid(sk, ack, acked_sacked);
3319         }
3320         tcp_update_pacing_rate(sk);
3321 }
3322 
3323 /* Check that window update is acceptable.
3324  * The function assumes that snd_una<=ack<=snd_next.
3325  */
3326 static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3327                                         const u32 ack, const u32 ack_seq,
3328                                         const u32 nwin)
3329 {
3330         return  after(ack, tp->snd_una) ||
3331                 after(ack_seq, tp->snd_wl1) ||
3332                 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
3333 }
3334 
3335 /* If we update tp->snd_una, also update tp->bytes_acked */
3336 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3337 {
3338         u32 delta = ack - tp->snd_una;
3339 
3340         sock_owned_by_me((struct sock *)tp);
3341         tp->bytes_acked += delta;
3342         tp->snd_una = ack;
3343 }
3344 
3345 /* If we update tp->rcv_nxt, also update tp->bytes_received */
3346 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3347 {
3348         u32 delta = seq - tp->rcv_nxt;
3349 
3350         sock_owned_by_me((struct sock *)tp);
3351         tp->bytes_received += delta;
3352         tp->rcv_nxt = seq;
3353 }
3354 
3355 /* Update our send window.
3356  *
3357  * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3358  * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3359  */
3360 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3361                                  u32 ack_seq)
3362 {
3363         struct tcp_sock *tp = tcp_sk(sk);
3364         int flag = 0;
3365         u32 nwin = ntohs(tcp_hdr(skb)->window);
3366 
3367         if (likely(!tcp_hdr(skb)->syn))
3368                 nwin <<= tp->rx_opt.snd_wscale;
3369 
3370         if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3371                 flag |= FLAG_WIN_UPDATE;
3372                 tcp_update_wl(tp, ack_seq);
3373 
3374                 if (tp->snd_wnd != nwin) {
3375                         tp->snd_wnd = nwin;
3376 
3377                         /* Note, it is the only place, where
3378                          * fast path is recovered for sending TCP.
3379                          */
3380                         tp->pred_flags = 0;
3381                         tcp_fast_path_check(sk);
3382 
3383                         if (!tcp_write_queue_empty(sk))
3384                                 tcp_slow_start_after_idle_check(sk);
3385 
3386                         if (nwin > tp->max_window) {
3387                                 tp->max_window = nwin;
3388                                 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
3389                         }
3390                 }
3391         }
3392 
3393         tcp_snd_una_update(tp, ack);
3394 
3395         return flag;
3396 }
3397 
3398 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
3399                                    u32 *last_oow_ack_time)
3400 {
3401         if (*last_oow_ack_time) {
3402                 s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
3403 
3404                 if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
3405                         NET_INC_STATS(net, mib_idx);
3406                         return true;    /* rate-limited: don't send yet! */
3407                 }
3408         }
3409 
3410         *last_oow_ack_time = tcp_jiffies32;
3411 
3412         return false;   /* not rate-limited: go ahead, send dupack now! */
3413 }
3414 
3415 /* Return true if we're currently rate-limiting out-of-window ACKs and
3416  * thus shouldn't send a dupack right now. We rate-limit dupacks in
3417  * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
3418  * attacks that send repeated SYNs or ACKs for the same connection. To
3419  * do this, we do not send a duplicate SYNACK or ACK if the remote
3420  * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
3421  */
3422 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3423                           int mib_idx, u32 *last_oow_ack_time)
3424 {
3425         /* Data packets without SYNs are not likely part of an ACK loop. */
3426         if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
3427             !tcp_hdr(skb)->syn)
3428                 return false;
3429 
3430         return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
3431 }
3432 
3433 /* RFC 5961 7 [ACK Throttling] */
3434 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
3435 {
3436         /* unprotected vars, we dont care of overwrites */
3437         static u32 challenge_timestamp;
3438         static unsigned int challenge_count;
3439         struct tcp_sock *tp = tcp_sk(sk);
3440         struct net *net = sock_net(sk);
3441         u32 count, now;
3442 
3443         /* First check our per-socket dupack rate limit. */
3444         if (__tcp_oow_rate_limited(net,
3445                                    LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
3446                                    &tp->last_oow_ack_time))
3447                 return;
3448 
3449         /* Then check host-wide RFC 5961 rate limit. */
3450         now = jiffies / HZ;
3451         if (now != challenge_timestamp) {
3452                 u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
3453                 u32 half = (ack_limit + 1) >> 1;
3454 
3455                 challenge_timestamp = now;
3456                 WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
3457         }
3458         count = READ_ONCE(challenge_count);
3459         if (count > 0) {
3460                 WRITE_ONCE(challenge_count, count - 1);
3461                 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
3462                 tcp_send_ack(sk);
3463         }
3464 }
3465 
3466 static void tcp_store_ts_recent(struct tcp_sock *tp)
3467 {
3468         tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3469         tp->rx_opt.ts_recent_stamp = get_seconds();
3470 }
3471 
3472 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3473 {
3474         if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
3475                 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
3476                  * extra check below makes sure this can only happen
3477                  * for pure ACK frames.  -DaveM
3478                  *
3479                  * Not only, also it occurs for expired timestamps.
3480                  */
3481 
3482                 if (tcp_paws_check(&tp->rx_opt, 0))
3483                         tcp_store_ts_recent(tp);
3484         }
3485 }
3486 
3487 /* This routine deals with acks during a TLP episode.
3488  * We mark the end of a TLP episode on receiving TLP dupack or when
3489  * ack is after tlp_high_seq.
3490  * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
3491  */
3492 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3493 {
3494         struct tcp_sock *tp = tcp_sk(sk);
3495 
3496         if (before(ack, tp->tlp_high_seq))
3497                 return;
3498 
3499         if (flag & FLAG_DSACKING_ACK) {
3500                 /* This DSACK means original and TLP probe arrived; no loss */
3501                 tp->tlp_high_seq = 0;
3502         } else if (after(ack, tp->tlp_high_seq)) {
3503                 /* ACK advances: there was a loss, so reduce cwnd. Reset
3504                  * tlp_high_seq in tcp_init_cwnd_reduction()
3505                  */
3506                 tcp_init_cwnd_reduction(sk);
3507                 tcp_set_ca_state(sk, TCP_CA_CWR);
3508                 tcp_end_cwnd_reduction(sk);
3509                 tcp_try_keep_open(sk);
3510                 NET_INC_STATS(sock_net(sk),
3511                                 LINUX_MIB_TCPLOSSPROBERECOVERY);
3512         } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
3513                              FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
3514                 /* Pure dupack: original and TLP probe arrived; no loss */
3515                 tp->tlp_high_seq = 0;
3516         }
3517 }
3518 
3519 static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
3520 {
3521         const struct inet_connection_sock *icsk = inet_csk(sk);
3522 
3523         if (icsk->icsk_ca_ops->in_ack_event)
3524                 icsk->icsk_ca_ops->in_ack_event(sk, flags);
3525 }
3526 
3527 /* Congestion control has updated the cwnd already. So if we're in
3528  * loss recovery then now we do any new sends (for FRTO) or
3529  * retransmits (for CA_Loss or CA_recovery) that make sense.
3530  */
3531 static void tcp_xmit_recovery(struct sock *sk, int rexmit)
3532 {
3533         struct tcp_sock *tp = tcp_sk(sk);
3534 
3535         if (rexmit == REXMIT_NONE)
3536                 return;
3537 
3538         if (unlikely(rexmit == 2)) {
3539                 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
3540                                           TCP_NAGLE_OFF);
3541                 if (after(tp->snd_nxt, tp->high_seq))
3542                         return;
3543                 tp->frto = 0;
3544         }
3545         tcp_xmit_retransmit_queue(sk);
3546 }
3547 
3548 /* Returns the number of packets newly acked or sacked by the current ACK */
3549 static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
3550 {
3551         const struct net *net = sock_net(sk);
3552         struct tcp_sock *tp = tcp_sk(sk);
3553         u32 delivered;
3554 
3555         delivered = tp->delivered - prior_delivered;
3556         NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
3557         if (flag & FLAG_ECE) {
3558                 tp->delivered_ce += delivered;
3559                 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered);
3560         }
3561         return delivered;
3562 }
3563 
3564 /* This routine deals with incoming acks, but not outgoing ones. */
3565 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3566 {
3567         struct inet_connection_sock *icsk = inet_csk(sk);
3568         struct tcp_sock *tp = tcp_sk(sk);
3569         struct tcp_sacktag_state sack_state;
3570         struct rate_sample rs = { .prior_delivered = 0 };
3571         u32 prior_snd_una = tp->snd_una;
3572         bool is_sack_reneg = tp->is_sack_reneg;
3573         u32 ack_seq = TCP_SKB_CB(skb)->seq;
3574         u32 ack = TCP_SKB_CB(skb)->ack_seq;
3575         bool is_dupack = false;
3576         int prior_packets = tp->packets_out;
3577         u32 delivered = tp->delivered;
3578         u32 lost = tp->lost;
3579         int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
3580         u32 prior_fack;
3581 
3582         sack_state.first_sackt = 0;
3583         sack_state.rate = &rs;
3584 
3585         /* We very likely will need to access rtx queue. */
3586         prefetch(sk->tcp_rtx_queue.rb_node);
3587 
3588         /* If the ack is older than previous acks
3589          * then we can probably ignore it.
3590          */
3591         if (before(ack, prior_snd_una)) {
3592                 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3593                 if (before(ack, prior_snd_una - tp->max_window)) {
3594                         if (!(flag & FLAG_NO_CHALLENGE_ACK))
3595                                 tcp_send_challenge_ack(sk, skb);
3596                         return -1;
3597                 }
3598                 goto old_ack;
3599         }
3600 
3601         /* If the ack includes data we haven't sent yet, discard
3602          * this segment (RFC793 Section 3.9).
3603          */
3604         if (after(ack, tp->snd_nxt))
3605                 goto invalid_ack;
3606 
3607         if (after(ack, prior_snd_una)) {
3608                 flag |= FLAG_SND_UNA_ADVANCED;
3609                 icsk->icsk_retransmits = 0;
3610 
3611 #if IS_ENABLED(CONFIG_TLS_DEVICE)
3612                 if (static_branch_unlikely(&clean_acked_data_enabled))
3613                         if (icsk->icsk_clean_acked)
3614                                 icsk->icsk_clean_acked(sk, ack);
3615 #endif
3616         }
3617 
3618         prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
3619         rs.prior_in_flight = tcp_packets_in_flight(tp);
3620 
3621         /* ts_recent update must be made after we are sure that the packet
3622          * is in window.
3623          */
3624         if (flag & FLAG_UPDATE_TS_RECENT)
3625                 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
3626 
3627         if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
3628                 /* Window is constant, pure forward advance.
3629                  * No more checks are required.
3630                  * Note, we use the fact that SND.UNA>=SND.WL2.
3631                  */
3632                 tcp_update_wl(tp, ack_seq);
3633                 tcp_snd_una_update(tp, ack);
3634                 flag |= FLAG_WIN_UPDATE;
3635 
3636                 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
3637 
3638                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
3639         } else {
3640                 u32 ack_ev_flags = CA_ACK_SLOWPATH;
3641 
3642                 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3643                         flag |= FLAG_DATA;
3644                 else
3645                         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3646 
3647                 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3648 
3649                 if (TCP_SKB_CB(skb)->sacked)
3650                         flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3651                                                         &sack_state);
3652 
3653                 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
3654                         flag |= FLAG_ECE;
3655                         ack_ev_flags |= CA_ACK_ECE;
3656                 }
3657 
3658                 if (flag & FLAG_WIN_UPDATE)
3659                         ack_ev_flags |= CA_ACK_WIN_UPDATE;
3660 
3661                 tcp_in_ack_event(sk, ack_ev_flags);
3662         }
3663 
3664         /* We passed data and got it acked, remove any soft error
3665          * log. Something worked...
3666          */
3667         sk->sk_err_soft = 0;
3668         icsk->icsk_probes_out = 0;
3669         tp->rcv_tstamp = tcp_jiffies32;
3670         if (!prior_packets)
3671                 goto no_queue;
3672 
3673         /* See if we can take anything off of the retransmit queue. */
3674         flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
3675 
3676         tcp_rack_update_reo_wnd(sk, &rs);
3677 
3678         if (tp->tlp_high_seq)
3679                 tcp_process_tlp_ack(sk, ack, flag);
3680         /* If needed, reset TLP/RTO timer; RACK may later override this. */
3681         if (flag & FLAG_SET_XMIT_TIMER)
3682                 tcp_set_xmit_timer(sk);
3683 
3684         if (tcp_ack_is_dubious(sk, flag)) {
3685                 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3686                 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
3687                                       &rexmit);
3688         }
3689 
3690         if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3691                 sk_dst_confirm(sk);
3692 
3693         delivered = tcp_newly_delivered(sk, delivered, flag);
3694         lost = tp->lost - lost;                 /* freshly marked lost */
3695         rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
3696         tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
3697         tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
3698         tcp_xmit_recovery(sk, rexmit);
3699         return 1;
3700 
3701 no_queue:
3702         /* If data was DSACKed, see if we can undo a cwnd reduction. */
3703         if (flag & FLAG_DSACKING_ACK) {
3704                 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
3705                                       &rexmit);
3706                 tcp_newly_delivered(sk, delivered, flag);
3707         }
3708         /* If this ack opens up a zero window, clear backoff.  It was
3709          * being used to time the probes, and is probably far higher than
3710          * it needs to be for normal retransmission.
3711          */
3712         tcp_ack_probe(sk);
3713 
3714         if (tp->tlp_high_seq)
3715                 tcp_process_tlp_ack(sk, ack, flag);
3716         return 1;
3717 
3718 invalid_ack:
3719         SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3720         return -1;
3721 
3722 old_ack:
3723         /* If data was SACKed, tag it and see if we should send more data.
3724          * If data was DSACKed, see if we can undo a cwnd reduction.
3725          */
3726         if (TCP_SKB_CB(skb)->sacked) {
3727                 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3728                                                 &sack_state);
3729                 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
3730                                       &rexmit);
3731                 tcp_newly_delivered(sk, delivered, flag);
3732                 tcp_xmit_recovery(sk, rexmit);
3733         }
3734 
3735         SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3736         return 0;
3737 }
3738 
3739 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
3740                                       bool syn, struct tcp_fastopen_cookie *foc,
3741                                       bool exp_opt)
3742 {
3743         /* Valid only in SYN or SYN-ACK with an even length.  */
3744         if (!foc || !syn || len < 0 || (len & 1))
3745                 return;
3746 
3747         if (len >= TCP_FASTOPEN_COOKIE_MIN &&
3748             len <= TCP_FASTOPEN_COOKIE_MAX)
3749                 memcpy(foc->val, cookie, len);
3750         else if (len != 0)
3751                 len = -1;
3752         foc->len = len;
3753         foc->exp = exp_opt;
3754 }
3755 
3756 static void smc_parse_options(const struct tcphdr *th,
3757                               struct tcp_options_received *opt_rx,
3758                               const unsigned char *ptr,
3759                               int opsize)
3760 {
3761 #if IS_ENABLED(CONFIG_SMC)
3762         if (static_branch_unlikely(&tcp_have_smc)) {
3763                 if (th->syn && !(opsize & 1) &&
3764                     opsize >= TCPOLEN_EXP_SMC_BASE &&
3765                     get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC)
3766                         opt_rx->smc_ok = 1;
3767         }
3768 #endif
3769 }
3770 
3771 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
3772  * But, this can also be called on packets in the established flow when
3773  * the fast version below fails.
3774  */
3775 void tcp_parse_options(const struct net *net,
3776                        const struct sk_buff *skb,
3777                        struct tcp_options_received *opt_rx, int estab,
3778                        struct tcp_fastopen_cookie *foc)
3779 {
3780         const unsigned char *ptr;
3781         const struct tcphdr *th = tcp_hdr(skb);
3782         int length = (th->doff * 4) - sizeof(struct tcphdr);
3783 
3784         ptr = (const unsigned char *)(th + 1);
3785         opt_rx->saw_tstamp = 0;
3786 
3787         while (length > 0) {
3788                 int opcode = *ptr++;
3789                 int opsize;
3790 
3791                 switch (opcode) {
3792                 case TCPOPT_EOL:
3793                         return;
3794                 case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
3795                         length--;
3796                         continue;
3797                 default:
3798                         opsize = *ptr++;
3799                         if (opsize < 2) /* "silly options" */
3800                                 return;
3801                         if (opsize > length)
3802                                 return; /* don't parse partial options */
3803                         switch (opcode) {
3804                         case TCPOPT_MSS:
3805                                 if (opsize == TCPOLEN_MSS && th->syn && !estab) {
3806                                         u16 in_mss = get_unaligned_be16(ptr);
3807                                         if (in_mss) {
3808                                                 if (opt_rx->user_mss &&
3809                                                     opt_rx->user_mss < in_mss)
3810                                                         in_mss = opt_rx->user_mss;
3811                                                 opt_rx->mss_clamp = in_mss;
3812                                         }
3813                                 }
3814                                 break;
3815                         case TCPOPT_WINDOW:
3816                                 if (opsize == TCPOLEN_WINDOW && th->syn &&
3817                                     !estab && net->ipv4.sysctl_tcp_window_scaling) {
3818                                         __u8 snd_wscale = *(__u8 *)ptr;
3819                                         opt_rx->wscale_ok = 1;
3820                                         if (snd_wscale > TCP_MAX_WSCALE) {
3821                                                 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n",
3822                                                                      __func__,
3823                                                                      snd_wscale,
3824                                                                      TCP_MAX_WSCALE);
3825                                                 snd_wscale = TCP_MAX_WSCALE;
3826                                         }
3827                                         opt_rx->snd_wscale = snd_wscale;
3828                                 }
3829                                 break;
3830                         case TCPOPT_TIMESTAMP:
3831                                 if ((opsize == TCPOLEN_TIMESTAMP) &&
3832                                     ((estab && opt_rx->tstamp_ok) ||
3833                                      (!estab && net->ipv4.sysctl_tcp_timestamps))) {
3834                                         opt_rx->saw_tstamp = 1;
3835                                         opt_rx->rcv_tsval = get_unaligned_be32(ptr);
3836                                         opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
3837                                 }
3838                                 break;
3839                         case TCPOPT_SACK_PERM:
3840                                 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
3841                                     !estab && net->ipv4.sysctl_tcp_sack) {
3842                                         opt_rx->sack_ok = TCP_SACK_SEEN;
3843                                         tcp_sack_reset(opt_rx);
3844                                 }
3845                                 break;
3846 
3847                         case TCPOPT_SACK:
3848                                 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
3849                                    !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
3850                                    opt_rx->sack_ok) {
3851                                         TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
3852                                 }
3853                                 break;
3854 #ifdef CONFIG_TCP_MD5SIG
3855                         case TCPOPT_MD5SIG:
3856                                 /*
3857                                  * The MD5 Hash has already been
3858                                  * checked (see tcp_v{4,6}_do_rcv()).
3859                                  */
3860                                 break;
3861 #endif
3862                         case TCPOPT_FASTOPEN:
3863                                 tcp_parse_fastopen_option(
3864                                         opsize - TCPOLEN_FASTOPEN_BASE,
3865                                         ptr, th->syn, foc, false);
3866                                 break;
3867 
3868                         case TCPOPT_EXP:
3869                                 /* Fast Open option shares code 254 using a
3870                                  * 16 bits magic number.
3871                                  */
3872                                 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
3873                                     get_unaligned_be16(ptr) ==
3874                                     TCPOPT_FASTOPEN_MAGIC)
3875                                         tcp_parse_fastopen_option(opsize -
3876                                                 TCPOLEN_EXP_FASTOPEN_BASE,
3877                                                 ptr + 2, th->syn, foc, true);
3878                                 else
3879                                         smc_parse_options(th, opt_rx, ptr,
3880                                                           opsize);
3881                                 break;
3882 
3883                         }
3884                         ptr += opsize-2;
3885                         length -= opsize;
3886                 }
3887         }
3888 }
3889 EXPORT_SYMBOL(tcp_parse_options);
3890 
3891 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
3892 {
3893         const __be32 *ptr = (const __be32 *)(th + 1);
3894 
3895         if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3896                           | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3897                 tp->rx_opt.saw_tstamp = 1;
3898                 ++ptr;
3899                 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3900                 ++ptr;
3901                 if (*ptr)
3902                         tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3903                 else
3904                         tp->rx_opt.rcv_tsecr = 0;
3905                 return true;
3906         }
3907         return false;
3908 }
3909 
3910 /* Fast parse options. This hopes to only see timestamps.
3911  * If it is wrong it falls back on tcp_parse_options().
3912  */
3913 static bool tcp_fast_parse_options(const struct net *net,
3914                                    const struct sk_buff *skb,
3915                                    const struct tcphdr *th, struct tcp_sock *tp)
3916 {
3917         /* In the spirit of fast parsing, compare doff directly to constant
3918          * values.  Because equality is used, short doff can be ignored here.
3919          */
3920         if (th->doff == (sizeof(*th) / 4)) {
3921                 tp->rx_opt.saw_tstamp = 0;
3922                 return false;
3923         } else if (tp->rx_opt.tstamp_ok &&
3924                    th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
3925                 if (tcp_parse_aligned_timestamp(tp, th))
3926                         return true;
3927         }
3928 
3929         tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
3930         if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
3931                 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3932 
3933         return true;
3934 }
3935 
3936 #ifdef CONFIG_TCP_MD5SIG
3937 /*
3938  * Parse MD5 Signature option
3939  */
3940 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3941 {
3942         int length = (th->doff << 2) - sizeof(*th);
3943         const u8 *ptr = (const u8 *)(th + 1);
3944 
3945         /* If not enough data remaining, we can short cut */
3946         while (length >= TCPOLEN_MD5SIG) {
3947                 int opcode = *ptr++;
3948                 int opsize;
3949 
3950                 switch (opcode) {
3951                 case TCPOPT_EOL:
3952                         return NULL;
3953                 case TCPOPT_NOP:
3954                         length--;
3955                         continue;
3956                 default:
3957                         opsize = *ptr++;
3958                         if (opsize < 2 || opsize > length)
3959                                 return NULL;
3960                         if (opcode == TCPOPT_MD5SIG)
3961                                 return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
3962                 }
3963                 ptr += opsize - 2;
3964                 length -= opsize;
3965         }
3966         return NULL;
3967 }
3968 EXPORT_SYMBOL(tcp_parse_md5sig_option);
3969 #endif
3970 
3971 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
3972  *
3973  * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
3974  * it can pass through stack. So, the following predicate verifies that
3975  * this segment is not used for anything but congestion avoidance or
3976  * fast retransmit. Moreover, we even are able to eliminate most of such
3977  * second order effects, if we apply some small "replay" window (~RTO)
3978  * to timestamp space.
3979  *
3980  * All these measures still do not guarantee that we reject wrapped ACKs
3981  * on networks with high bandwidth, when sequence space is recycled fastly,
3982  * but it guarantees that such events will be very rare and do not affect
3983  * connection seriously. This doesn't look nice, but alas, PAWS is really
3984  * buggy extension.
3985  *
3986  * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
3987  * states that events when retransmit arrives after original data are rare.
3988  * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
3989  * the biggest problem on large power networks even with minor reordering.
3990  * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
3991  * up to bandwidth of 18Gigabit/sec. 8) ]
3992  */
3993 
3994 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
3995 {
3996         const struct tcp_sock *tp = tcp_sk(sk);
3997         const struct tcphdr *th = tcp_hdr(skb);
3998         u32 seq = TCP_SKB_CB(skb)->seq;
3999         u32 ack = TCP_SKB_CB(skb)->ack_seq;
4000 
4001         return (/* 1. Pure ACK with correct sequence number. */
4002                 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
4003 
4004                 /* 2. ... and duplicate ACK. */
4005                 ack == tp->snd_una &&
4006 
4007                 /* 3. ... and does not update window. */
4008                 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
4009 
4010                 /* 4. ... and sits in replay window. */
4011                 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
4012 }
4013 
4014 static inline bool tcp_paws_discard(const struct sock *sk,
4015                                    const struct sk_buff *skb)
4016 {
4017         const struct tcp_sock *tp = tcp_sk(sk);
4018 
4019         return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
4020                !tcp_disordered_ack(sk, skb);
4021 }
4022 
4023 /* Check segment sequence number for validity.
4024  *
4025  * Segment controls are considered valid, if the segment
4026  * fits to the window after truncation to the window. Acceptability
4027  * of data (and SYN, FIN, of course) is checked separately.
4028  * See tcp_data_queue(), for example.
4029  *
4030  * Also, controls (RST is main one) are accepted using RCV.WUP instead
4031  * of RCV.NXT. Peer still did not advance his SND.UNA when we
4032  * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
4033  * (borrowed from freebsd)
4034  */
4035 
4036 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4037 {
4038         return  !before(end_seq, tp->rcv_wup) &&
4039                 !after(seq, tp->rcv_nxt + tcp_receive_window(tp));
4040 }
4041 
4042 /* When we get a reset we do this. */
4043 void tcp_reset(struct sock *sk)
4044 {
4045         trace_tcp_receive_reset(sk);
4046 
4047         /* We want the right error as BSD sees it (and indeed as we do). */
4048         switch (sk->sk_state) {
4049         case TCP_SYN_SENT:
4050                 sk->sk_err = ECONNREFUSED;
4051                 break;
4052         case TCP_CLOSE_WAIT:
4053                 sk->sk_err = EPIPE;
4054                 break;
4055         case TCP_CLOSE:
4056                 return;
4057         default:
4058                 sk->sk_err = ECONNRESET;
4059         }
4060         /* This barrier is coupled with smp_rmb() in tcp_poll() */
4061         smp_wmb();
4062 
4063         tcp_write_queue_purge(sk);
4064         tcp_done(sk);
4065 
4066         if (!sock_flag(sk, SOCK_DEAD))
4067                 sk->sk_error_report(sk);
4068 }
4069 
4070 /*
4071  *      Process the FIN bit. This now behaves as it is supposed to work
4072  *      and the FIN takes effect when it is validly part of sequence
4073  *      space. Not before when we get holes.
4074  *
4075  *      If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
4076  *      (and thence onto LAST-ACK and finally, CLOSE, we never enter
4077  *      TIME-WAIT)
4078  *
4079  *      If we are in FINWAIT-1, a received FIN indicates simultaneous
4080  *      close and we go into CLOSING (and later onto TIME-WAIT)
4081  *
4082  *      If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4083  */
4084 void tcp_fin(struct sock *sk)
4085 {
4086         struct tcp_sock *tp = tcp_sk(sk);
4087 
4088         inet_csk_schedule_ack(sk);
4089 
4090         sk->sk_shutdown |= RCV_SHUTDOWN;
4091         sock_set_flag(sk, SOCK_DONE);
4092 
4093         switch (sk->sk_state) {
4094         case TCP_SYN_RECV:
4095         case TCP_ESTABLISHED:
4096                 /* Move to CLOSE_WAIT */
4097                 tcp_set_state(sk, TCP_CLOSE_WAIT);
4098                 inet_csk(sk)->icsk_ack.pingpong = 1;
4099                 break;
4100 
4101         case TCP_CLOSE_WAIT:
4102         case TCP_CLOSING:
4103                 /* Received a retransmission of the FIN, do
4104                  * nothing.
4105                  */
4106                 break;
4107         case TCP_LAST_ACK:
4108                 /* RFC793: Remain in the LAST-ACK state. */
4109                 break;
4110 
4111         case TCP_FIN_WAIT1:
4112                 /* This case occurs when a simultaneous close
4113                  * happens, we must ack the received FIN and
4114                  * enter the CLOSING state.
4115                  */
4116                 tcp_send_ack(sk);
4117                 tcp_set_state(sk, TCP_CLOSING);
4118                 break;
4119         case TCP_FIN_WAIT2:
4120                 /* Received a FIN -- send ACK and enter TIME_WAIT. */
4121                 tcp_send_ack(sk);
4122                 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4123                 break;
4124         default:
4125                 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
4126                  * cases we should never reach this piece of code.
4127                  */
4128                 pr_err("%s: Impossible, sk->sk_state=%d\n",
4129                        __func__, sk->sk_state);
4130                 break;
4131         }
4132 
4133         /* It _is_ possible, that we have something out-of-order _after_ FIN.
4134          * Probably, we should reset in this case. For now drop them.
4135          */
4136         skb_rbtree_purge(&tp->out_of_order_queue);
4137         if (tcp_is_sack(tp))
4138                 tcp_sack_reset(&tp->rx_opt);
4139         sk_mem_reclaim(sk);
4140 
4141         if (!sock_flag(sk, SOCK_DEAD)) {
4142                 sk->sk_state_change(sk);
4143 
4144                 /* Do not send POLL_HUP for half duplex close. */
4145                 if (sk->sk_shutdown == SHUTDOWN_MASK ||
4146                     sk->sk_state == TCP_CLOSE)
4147                         sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
4148                 else
4149                         sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
4150         }
4151 }
4152 
4153 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4154                                   u32 end_seq)
4155 {
4156         if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
4157                 if (before(seq, sp->start_seq))
4158                         sp->start_seq = seq;
4159                 if (after(end_seq, sp->end_seq))
4160                         sp->end_seq = end_seq;
4161                 return true;
4162         }
4163         return false;
4164 }
4165 
4166 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4167 {
4168         struct tcp_sock *tp = tcp_sk(sk);
4169 
4170         if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
4171                 int mib_idx;
4172 
4173                 if (before(seq, tp->rcv_nxt))
4174                         mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
4175                 else
4176                         mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4177 
4178                 NET_INC_STATS(sock_net(sk), mib_idx);
4179 
4180                 tp->rx_opt.dsack = 1;
4181                 tp->duplicate_sack[0].start_seq = seq;
4182                 tp->duplicate_sack[0].end_seq = end_seq;
4183         }
4184 }
4185 
4186 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4187 {
4188         struct tcp_sock *tp = tcp_sk(sk);
4189 
4190         if (!tp->rx_opt.dsack)
4191                 tcp_dsack_set(sk, seq, end_seq);
4192         else
4193                 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4194 }
4195 
4196 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4197 {
4198         struct tcp_sock *tp = tcp_sk(sk);
4199 
4200         if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4201             before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4202                 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4203                 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4204 
4205                 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
4206                         u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4207 
4208                         if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
4209                                 end_seq = tp->rcv_nxt;
4210                         tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
4211                 }
4212         }
4213 
4214         tcp_send_ack(sk);
4215 }
4216 
4217 /* These routines update the SACK block as out-of-order packets arrive or
4218  * in-order packets close up the sequence space.
4219  */
4220 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
4221 {
4222         int this_sack;
4223         struct tcp_sack_block *sp = &tp->selective_acks[0];
4224         struct tcp_sack_block *swalk = sp + 1;
4225 
4226         /* See if the recent change to the first SACK eats into
4227          * or hits the sequence space of other SACK blocks, if so coalesce.
4228          */
4229         for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
4230                 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
4231                         int i;
4232 
4233                         /* Zap SWALK, by moving every further SACK up by one slot.
4234                          * Decrease num_sacks.
4235                          */
4236                         tp->rx_opt.num_sacks--;
4237                         for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
4238                                 sp[i] = sp[i + 1];
4239                         continue;
4240                 }
4241                 this_sack++, swalk++;
4242         }
4243 }
4244 
4245 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4246 {
4247         struct tcp_sock *tp = tcp_sk(sk);
4248         struct tcp_sack_block *sp = &tp->selective_acks[0];
4249         int cur_sacks = tp->rx_opt.num_sacks;
4250         int this_sack;
4251 
4252         if (!cur_sacks)
4253                 goto new_sack;
4254 
4255         for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
4256                 if (tcp_sack_extend(sp, seq, end_seq)) {
4257                         /* Rotate this_sack to the first one. */
4258                         for (; this_sack > 0; this_sack--, sp--)
4259                                 swap(*sp, *(sp - 1));
4260                         if (cur_sacks > 1)
4261                                 tcp_sack_maybe_coalesce(tp);
4262                         return;
4263                 }
4264         }
4265 
4266         /* Could not find an adjacent existing SACK, build a new one,
4267          * put it at the front, and shift everyone else down.  We
4268          * always know there is at least one SACK present already here.
4269          *
4270          * If the sack array is full, forget about the last one.
4271          */
4272         if (this_sack >= TCP_NUM_SACKS) {
4273                 if (tp->compressed_ack)
4274                         tcp_send_ack(sk);
4275                 this_sack--;
4276                 tp->rx_opt.num_sacks--;
4277                 sp--;
4278         }
4279         for (; this_sack > 0; this_sack--, sp--)
4280                 *sp = *(sp - 1);
4281 
4282 new_sack:
4283         /* Build the new head SACK, and we're done. */
4284         sp->start_seq = seq;
4285         sp->end_seq = end_seq;
4286         tp->rx_opt.num_sacks++;
4287 }
4288 
4289 /* RCV.NXT advances, some SACKs should be eaten. */
4290 
4291 static void tcp_sack_remove(struct tcp_sock *tp)
4292 {
4293         struct tcp_sack_block *sp = &tp->selective_acks[0];
4294         int num_sacks = tp->rx_opt.num_sacks;
4295         int this_sack;
4296 
4297         /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
4298         if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
4299                 tp->rx_opt.num_sacks = 0;
4300                 return;
4301         }
4302 
4303         for (this_sack = 0; this_sack < num_sacks;) {
4304                 /* Check if the start of the sack is covered by RCV.NXT. */
4305                 if (!before(tp->rcv_nxt, sp->start_seq)) {
4306                         int i;
4307 
4308                         /* RCV.NXT must cover all the block! */
4309                         WARN_ON(before(tp->rcv_nxt, sp->end_seq));
4310 
4311                         /* Zap this SACK, by moving forward any other SACKS. */
4312                         for (i = this_sack+1; i < num_sacks; i++)
4313                                 tp->selective_acks[i-1] = tp->selective_acks[i];
4314                         num_sacks--;
4315                         continue;
4316                 }
4317                 this_sack++;
4318                 sp++;
4319         }
4320         tp->rx_opt.num_sacks = num_sacks;
4321 }
4322 
4323 /**
4324  * tcp_try_coalesce - try to merge skb to prior one
4325  * @sk: socket
4326  * @dest: destination queue
4327  * @to: prior buffer
4328  * @from: buffer to add in queue
4329  * @fragstolen: pointer to boolean
4330  *
4331  * Before queueing skb @from after @to, try to merge them
4332  * to reduce overall memory use and queue lengths, if cost is small.
4333  * Packets in ofo or receive queues can stay a long time.
4334  * Better try to coalesce them right now to avoid future collapses.
4335  * Returns true if caller should free @from instead of queueing it
4336  */
4337 static bool tcp_try_coalesce(struct sock *sk,
4338                              struct sk_buff *to,
4339                              struct sk_buff *from,
4340                              bool *fragstolen)
4341 {
4342         int delta;
4343 
4344         *fragstolen = false;
4345 
4346         /* Its possible this segment overlaps with prior segment in queue */
4347         if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
4348                 return false;
4349 
4350         if (!skb_try_coalesce(to, from, fragstolen, &delta))
4351                 return false;
4352 
4353         atomic_add(delta, &sk->sk_rmem_alloc);
4354         sk_mem_charge(sk, delta);
4355         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4356         TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4357         TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4358         TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
4359 
4360         if (TCP_SKB_CB(from)->has_rxtstamp) {
4361                 TCP_SKB_CB(to)->has_rxtstamp = true;
4362                 to->tstamp = from->tstamp;
4363         }
4364 
4365         return true;
4366 }
4367 
4368 static bool tcp_ooo_try_coalesce(struct sock *sk,
4369                              struct sk_buff *to,
4370                              struct sk_buff *from,
4371                              bool *fragstolen)
4372 {
4373         bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4374 
4375         /* In case tcp_drop() is called later, update to->gso_segs */
4376         if (res) {
4377                 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4378                                max_t(u16, 1, skb_shinfo(from)->gso_segs);
4379 
4380                 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4381         }
4382         return res;
4383 }
4384 
4385 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
4386 {
4387         sk_drops_add(sk, skb);
4388         __kfree_skb(skb);
4389 }
4390 
4391 /* This one checks to see if we can put data from the
4392  * out_of_order queue into the receive_queue.
4393  */
4394 static void tcp_ofo_queue(struct sock *sk)
4395 {
4396         struct tcp_sock *tp = tcp_sk(sk);
4397         __u32 dsack_high = tp->rcv_nxt;
4398         bool fin, fragstolen, eaten;
4399         struct sk_buff *skb, *tail;
4400         struct rb_node *p;
4401 
4402         p = rb_first(&tp->out_of_order_queue);
4403         while (p) {
4404                 skb = rb_to_skb(p);
4405                 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4406                         break;
4407 
4408                 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
4409                         __u32 dsack = dsack_high;
4410                         if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4411                                 dsack_high = TCP_SKB_CB(skb)->end_seq;
4412                         tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
4413                 }
4414                 p = rb_next(p);
4415                 rb_erase(&skb->rbnode, &tp->out_of_order_queue);
4416 
4417                 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
4418                         SOCK_DEBUG(sk, "ofo packet was already received\n");
4419                         tcp_drop(sk, skb);
4420                         continue;
4421                 }
4422                 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
4423                            tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4424                            TCP_SKB_CB(skb)->end_seq);
4425 
4426                 tail = skb_peek_tail(&sk->sk_receive_queue);
4427                 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
4428                 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
4429                 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
4430                 if (!eaten)
4431                         __skb_queue_tail(&sk->sk_receive_queue, skb);
4432                 else
4433                         kfree_skb_partial(skb, fragstolen);
4434 
4435                 if (unlikely(fin)) {
4436                         tcp_fin(sk);
4437                         /* tcp_fin() purges tp->out_of_order_queue,
4438                          * so we must end this loop right now.
4439                          */
4440                         break;
4441                 }
4442         }
4443 }
4444 
4445 static bool tcp_prune_ofo_queue(struct sock *sk);
4446 static int tcp_prune_queue(struct sock *sk);
4447 
4448 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
4449                                  unsigned int size)
4450 {
4451         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4452             !sk_rmem_schedule(sk, skb, size)) {
4453 
4454                 if (tcp_prune_queue(sk) < 0)
4455                         return -1;
4456 
4457                 while (!sk_rmem_schedule(sk, skb, size)) {
4458                         if (!tcp_prune_ofo_queue(sk))
4459                                 return -1;
4460                 }
4461         }
4462         return 0;
4463 }
4464 
4465 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4466 {
4467         struct tcp_sock *tp = tcp_sk(sk);
4468         struct rb_node **p, *parent;
4469         struct sk_buff *skb1;
4470         u32 seq, end_seq;
4471         bool fragstolen;
4472 
4473         tcp_ecn_check_ce(sk, skb);
4474 
4475         if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4476                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
4477                 tcp_drop(sk, skb);
4478                 return;
4479         }
4480 
4481         /* Disable header prediction. */
4482         tp->pred_flags = 0;
4483         inet_csk_schedule_ack(sk);
4484 
4485         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
4486         seq = TCP_SKB_CB(skb)->seq;
4487         end_seq = TCP_SKB_CB(skb)->end_seq;
4488         SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4489                    tp->rcv_nxt, seq, end_seq);
4490 
4491         p = &tp->out_of_order_queue.rb_node;
4492         if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
4493                 /* Initial out of order segment, build 1 SACK. */
4494                 if (tcp_is_sack(tp)) {
4495                         tp->rx_opt.num_sacks = 1;
4496                         tp->selective_acks[0].start_seq = seq;
4497                         tp->selective_acks[0].end_seq = end_seq;
4498                 }
4499                 rb_link_node(&skb->rbnode, NULL, p);
4500                 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
4501                 tp->ooo_last_skb = skb;
4502                 goto end;
4503         }
4504 
4505         /* In the typical case, we are adding an skb to the end of the list.
4506          * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
4507          */
4508         if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4509                                  skb, &fragstolen)) {
4510 coalesce_done:
4511                 tcp_grow_window(sk, skb);
4512                 kfree_skb_partial(skb, fragstolen);
4513                 skb = NULL;
4514                 goto add_sack;
4515         }
4516         /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
4517         if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
4518                 parent = &tp->ooo_last_skb->rbnode;
4519                 p = &parent->rb_right;
4520                 goto insert;
4521         }
4522 
4523         /* Find place to insert this segment. Handle overlaps on the way. */
4524         parent = NULL;
4525         while (*p) {
4526                 parent = *p;
4527                 skb1 = rb_to_skb(parent);
4528                 if (before(seq, TCP_SKB_CB(skb1)->seq)) {
4529                         p = &parent->rb_left;
4530                         continue;
4531                 }
4532                 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4533                         if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4534                                 /* All the bits are present. Drop. */
4535                                 NET_INC_STATS(sock_net(sk),
4536                                               LINUX_MIB_TCPOFOMERGE);
4537                                 tcp_drop(sk, skb);
4538                                 skb = NULL;
4539                                 tcp_dsack_set(sk, seq, end_seq);
4540                                 goto add_sack;
4541                         }
4542                         if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4543                                 /* Partial overlap. */
4544                                 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
4545                         } else {
4546                                 /* skb's seq == skb1's seq and skb covers skb1.
4547                                  * Replace skb1 with skb.
4548                                  */
4549                                 rb_replace_node(&skb1->rbnode, &skb->rbnode,
4550                                                 &tp->out_of_order_queue);
4551                                 tcp_dsack_extend(sk,
4552                                                  TCP_SKB_CB(skb1)->seq,
4553                                                  TCP_SKB_CB(skb1)->end_seq);
4554                                 NET_INC_STATS(sock_net(sk),
4555                                               LINUX_MIB_TCPOFOMERGE);
4556                                 tcp_drop(sk, skb1);
4557                                 goto merge_right;
4558                         }
4559                 } else if (tcp_ooo_try_coalesce(sk, skb1,
4560                                                 skb, &fragstolen)) {
4561                         goto coalesce_done;
4562                 }
4563                 p = &parent->rb_right;
4564         }
4565 insert:
4566         /* Insert segment into RB tree. */
4567         rb_link_node(&skb->rbnode, parent, p);
4568         rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
4569 
4570 merge_right:
4571         /* Remove other segments covered by skb. */
4572         while ((skb1 = skb_rb_next(skb)) != NULL) {
4573                 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4574                         break;
4575                 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4576                         tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4577                                          end_seq);
4578                         break;
4579                 }
4580                 rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
4581                 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4582                                  TCP_SKB_CB(skb1)->end_seq);
4583                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4584                 tcp_drop(sk, skb1);
4585         }
4586         /* If there is no skb after us, we are the last_skb ! */
4587         if (!skb1)
4588                 tp->ooo_last_skb = skb;
4589 
4590 add_sack:
4591         if (tcp_is_sack(tp))
4592                 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4593 end:
4594         if (skb) {
4595                 tcp_grow_window(sk, skb);
4596                 skb_condense(skb);
4597                 skb_set_owner_r(skb, sk);
4598         }
4599 }
4600 
4601 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
4602                   bool *fragstolen)
4603 {
4604         int eaten;
4605         struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
4606 
4607         __skb_pull(skb, hdrlen);
4608         eaten = (tail &&
4609                  tcp_try_coalesce(sk, tail,
4610                                   skb, fragstolen)) ? 1 : 0;
4611         tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
4612         if (!eaten) {
4613                 __skb_queue_tail(&sk->sk_receive_queue, skb);
4614                 skb_set_owner_r(skb, sk);
4615         }
4616         return eaten;
4617 }
4618 
4619 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4620 {
4621         struct sk_buff *skb;
4622         int err = -ENOMEM;
4623         int data_len = 0;
4624         bool fragstolen;
4625 
4626         if (size == 0)
4627                 return 0;
4628 
4629         if (size > PAGE_SIZE) {
4630                 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
4631 
4632                 data_len = npages << PAGE_SHIFT;
4633                 size = data_len + (size & ~PAGE_MASK);
4634         }
4635         skb = alloc_skb_with_frags(size - data_len, data_len,
4636                                    PAGE_ALLOC_COSTLY_ORDER,
4637                                    &err, sk->sk_allocation);
4638         if (!skb)
4639                 goto err;
4640 
4641         skb_put(skb, size - data_len);
4642         skb->data_len = data_len;
4643         skb->len = size;
4644 
4645         if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
4646                 goto err_free;
4647 
4648         err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
4649         if (err)
4650                 goto err_free;
4651 
4652         TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
4653         TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
4654         TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
4655 
4656         if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) {
4657                 WARN_ON_ONCE(fragstolen); /* should not happen */
4658                 __kfree_skb(skb);
4659         }
4660         return size;
4661 
4662 err_free:
4663         kfree_skb(skb);
4664 err:
4665         return err;
4666 
4667 }
4668 
4669 void tcp_data_ready(struct sock *sk)
4670 {
4671         const struct tcp_sock *tp = tcp_sk(sk);
4672         int avail = tp->rcv_nxt - tp->copied_seq;
4673 
4674         if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE))
4675                 return;
4676 
4677         sk->sk_data_ready(sk);
4678 }
4679 
4680 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4681 {
4682         struct tcp_sock *tp = tcp_sk(sk);
4683         bool fragstolen;
4684         int eaten;
4685 
4686         if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
4687                 __kfree_skb(skb);
4688                 return;
4689         }
4690         skb_dst_drop(skb);
4691         __skb_pull(skb, tcp_hdr(skb)->doff * 4);
4692 
4693         tcp_ecn_accept_cwr(tp, skb);
4694 
4695         tp->rx_opt.dsack = 0;
4696 
4697         /*  Queue data for delivery to the user.
4698          *  Packets in sequence go to the receive queue.
4699          *  Out of sequence packets to the out_of_order_queue.
4700          */
4701         if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
4702                 if (tcp_receive_window(tp) == 0)
4703                         goto out_of_window;
4704 
4705                 /* Ok. In sequence. In window. */
4706 queue_and_out:
4707                 if (skb_queue_len(&sk->sk_receive_queue) == 0)
4708                         sk_forced_mem_schedule(sk, skb->truesize);
4709                 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
4710                         goto drop;
4711 
4712                 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4713                 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
4714                 if (skb->len)
4715                         tcp_event_data_recv(sk, skb);
4716                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
4717                         tcp_fin(sk);
4718 
4719                 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
4720                         tcp_ofo_queue(sk);
4721 
4722                         /* RFC2581. 4.2. SHOULD send immediate ACK, when
4723                          * gap in queue is filled.
4724                          */
4725                         if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
4726                                 inet_csk(sk)->icsk_ack.pingpong = 0;
4727                 }
4728 
4729                 if (tp->rx_opt.num_sacks)
4730                         tcp_sack_remove(tp);
4731 
4732                 tcp_fast_path_check(sk);
4733 
4734                 if (eaten > 0)
4735                         kfree_skb_partial(skb, fragstolen);
4736                 if (!sock_flag(sk, SOCK_DEAD))
4737                         tcp_data_ready(sk);
4738                 return;
4739         }
4740 
4741         if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4742                 /* A retransmit, 2nd most common case.  Force an immediate ack. */
4743                 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4744                 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4745 
4746 out_of_window:
4747                 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4748                 inet_csk_schedule_ack(sk);
4749 drop:
4750                 tcp_drop(sk, skb);
4751                 return;
4752         }
4753 
4754         /* Out of window. F.e. zero window probe. */
4755         if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
4756                 goto out_of_window;
4757 
4758         if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4759                 /* Partial packet, seq < rcv_next < end_seq */
4760                 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
4761                            tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4762                            TCP_SKB_CB(skb)->end_seq);
4763 
4764                 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
4765 
4766                 /* If window is closed, drop tail of packet. But after
4767                  * remembering D-SACK for its head made in previous line.
4768                  */
4769                 if (!tcp_receive_window(tp))
4770                         goto out_of_window;
4771                 goto queue_and_out;
4772         }
4773 
4774         tcp_data_queue_ofo(sk, skb);
4775 }
4776 
4777 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
4778 {
4779         if (list)
4780                 return !skb_queue_is_last(list, skb) ? skb->next : NULL;
4781 
4782         return skb_rb_next(skb);
4783 }
4784 
4785 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4786                                         struct sk_buff_head *list,
4787                                         struct rb_root *root)
4788 {
4789         struct sk_buff *next = tcp_skb_next(skb, list);
4790 
4791         if (list)
4792                 __skb_unlink(skb, list);
4793         else
4794                 rb_erase(&skb->rbnode, root);
4795 
4796         __kfree_skb(skb);
4797         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4798 
4799         return next;
4800 }
4801 
4802 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
4803 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
4804 {
4805         struct rb_node **p = &root->rb_node;
4806         struct rb_node *parent = NULL;
4807         struct sk_buff *skb1;
4808 
4809         while (*p) {
4810                 parent = *p;
4811                 skb1 = rb_to_skb(parent);
4812                 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
4813                         p = &parent->rb_left;
4814                 else
4815                         p = &parent->rb_right;
4816         }
4817         rb_link_node(&skb->rbnode, parent, p);
4818         rb_insert_color(&skb->rbnode, root);
4819 }
4820 
4821 /* Collapse contiguous sequence of skbs head..tail with
4822  * sequence numbers start..end.
4823  *
4824  * If tail is NULL, this means until the end of the queue.
4825  *
4826  * Segments with FIN/SYN are not collapsed (only because this
4827  * simplifies code)
4828  */
4829 static void
4830 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
4831              struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
4832 {
4833         struct sk_buff *skb = head, *n;
4834         struct sk_buff_head tmp;
4835         bool end_of_skbs;
4836 
4837         /* First, check that queue is collapsible and find
4838          * the point where collapsing can be useful.
4839          */
4840 restart:
4841         for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
4842                 n = tcp_skb_next(skb, list);
4843 
4844                 /* No new bits? It is possible on ofo queue. */
4845                 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4846                         skb = tcp_collapse_one(sk, skb, list, root);
4847                         if (!skb)
4848                                 break;
4849                         goto restart;
4850                 }
4851 
4852                 /* The first skb to collapse is:
4853                  * - not SYN/FIN and
4854                  * - bloated or contains data before "start" or
4855                  *   overlaps to the next one.
4856                  */
4857                 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
4858                     (tcp_win_from_space(sk, skb->truesize) > skb->len ||
4859                      before(TCP_SKB_CB(skb)->seq, start))) {
4860                         end_of_skbs = false;
4861                         break;
4862                 }
4863 
4864                 if (n && n != tail &&
4865                     TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
4866                         end_of_skbs = false;
4867                         break;
4868                 }
4869 
4870                 /* Decided to skip this, advance start seq. */
4871                 start = TCP_SKB_CB(skb)->end_seq;
4872         }
4873         if (end_of_skbs ||
4874             (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
4875                 return;
4876 
4877         __skb_queue_head_init(&tmp);
4878 
4879         while (before(start, end)) {
4880                 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
4881                 struct sk_buff *nskb;
4882 
4883                 nskb = alloc_skb(copy, GFP_ATOMIC);
4884                 if (!nskb)
4885                         break;
4886 
4887                 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4888                 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4889                 if (list)
4890                         __skb_queue_before(list, skb, nskb);
4891                 else
4892                         __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
4893                 skb_set_owner_r(nskb, sk);
4894 
4895                 /* Copy data, releasing collapsed skbs. */
4896                 while (copy > 0) {
4897                         int offset = start - TCP_SKB_CB(skb)->seq;
4898                         int size = TCP_SKB_CB(skb)->end_seq - start;
4899 
4900                         BUG_ON(offset < 0);
4901                         if (size > 0) {
4902                                 size = min(copy, size);
4903                                 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
4904                                         BUG();
4905                                 TCP_SKB_CB(nskb)->end_seq += size;
4906                                 copy -= size;
4907                                 start += size;
4908                         }
4909                         if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4910                                 skb = tcp_collapse_one(sk, skb, list, root);
4911                                 if (!skb ||
4912                                     skb == tail ||
4913                                     (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
4914                                         goto end;
4915                         }
4916                 }
4917         }
4918 end:
4919         skb_queue_walk_safe(&tmp, skb, n)
4920                 tcp_rbtree_insert(root, skb);
4921 }
4922 
4923 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
4924  * and tcp_collapse() them until all the queue is collapsed.
4925  */
4926 static void tcp_collapse_ofo_queue(struct sock *sk)
4927 {
4928         struct tcp_sock *tp = tcp_sk(sk);
4929         u32 range_truesize, sum_tiny = 0;
4930         struct sk_buff *skb, *head;
4931         u32 start, end;
4932 
4933         skb = skb_rb_first(&tp->out_of_order_queue);
4934 new_range:
4935         if (!skb) {
4936                 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
4937                 return;
4938         }
4939         start = TCP_SKB_CB(skb)->seq;
4940         end = TCP_SKB_CB(skb)->end_seq;
4941         range_truesize = skb->truesize;
4942 
4943         for (head = skb;;) {
4944                 skb = skb_rb_next(skb);
4945 
4946                 /* Range is terminated when we see a gap or when
4947                  * we are at the queue end.
4948                  */
4949                 if (!skb ||
4950                     after(TCP_SKB_CB(skb)->seq, end) ||
4951                     before(TCP_SKB_CB(skb)->end_seq, start)) {
4952                         /* Do not attempt collapsing tiny skbs */
4953                         if (range_truesize != head->truesize ||
4954                             end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
4955                                 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
4956                                              head, skb, start, end);
4957                         } else {
4958                                 sum_tiny += range_truesize;
4959                                 if (sum_tiny > sk->sk_rcvbuf >> 3)
4960                                         return;
4961                         }
4962                         goto new_range;
4963                 }
4964 
4965                 range_truesize += skb->truesize;
4966                 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
4967                         start = TCP_SKB_CB(skb)->seq;
4968                 if (after(TCP_SKB_CB(skb)->end_seq, end))
4969                         end = TCP_SKB_CB(skb)->end_seq;
4970         }
4971 }
4972 
4973 /*
4974  * Clean the out-of-order queue to make room.
4975  * We drop high sequences packets to :
4976  * 1) Let a chance for holes to be filled.
4977  * 2) not add too big latencies if thousands of packets sit there.
4978  *    (But if application shrinks SO_RCVBUF, we could still end up
4979  *     freeing whole queue here)
4980  * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
4981  *
4982  * Return true if queue has shrunk.
4983  */
4984 static bool tcp_prune_ofo_queue(struct sock *sk)
4985 {
4986         struct tcp_sock *tp = tcp_sk(sk);
4987         struct rb_node *node, *prev;
4988         int goal;
4989 
4990         if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
4991                 return false;
4992 
4993         NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
4994         goal = sk->sk_rcvbuf >> 3;
4995         node = &tp->ooo_last_skb->rbnode;
4996         do {
4997                 prev = rb_prev(node);
4998                 rb_erase(node, &tp->out_of_order_queue);
4999                 goal -= rb_to_skb(node)->truesize;
5000                 tcp_drop(sk, rb_to_skb(node));
5001                 if (!prev || goal <= 0) {
5002                         sk_mem_reclaim(sk);
5003                         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5004                             !tcp_under_memory_pressure(sk))
5005                                 break;
5006                         goal = sk->sk_rcvbuf >> 3;
5007                 }
5008                 node = prev;
5009         } while (node);
5010         tp->ooo_last_skb = rb_to_skb(prev);
5011 
5012         /* Reset SACK state.  A conforming SACK implementation will
5013          * do the same at a timeout based retransmit.  When a connection
5014          * is in a sad state like this, we care only about integrity
5015          * of the connection not performance.
5016          */
5017         if (tp->rx_opt.sack_ok)
5018                 tcp_sack_reset(&tp->rx_opt);
5019         return true;
5020 }
5021 
5022 /* Reduce allocated memory if we can, trying to get
5023  * the socket within its memory limits again.
5024  *
5025  * Return less than zero if we should start dropping frames
5026  * until the socket owning process reads some of the data
5027  * to stabilize the situation.
5028  */
5029 static int tcp_prune_queue(struct sock *sk)
5030 {
5031         struct tcp_sock *tp = tcp_sk(sk);
5032 
5033         SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
5034 
5035         NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
5036 
5037         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
5038                 tcp_clamp_window(sk);
5039         else if (tcp_under_memory_pressure(sk))
5040                 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
5041 
5042         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5043                 return 0;
5044 
5045         tcp_collapse_ofo_queue(sk);
5046         if (!skb_queue_empty(&sk->sk_receive_queue))
5047                 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
5048                              skb_peek(&sk->sk_receive_queue),
5049                              NULL,
5050                              tp->copied_seq, tp->rcv_nxt);
5051         sk_mem_reclaim(sk);
5052 
5053         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5054                 return 0;
5055 
5056         /* Collapsing did not help, destructive actions follow.
5057          * This must not ever occur. */
5058 
5059         tcp_prune_ofo_queue(sk);
5060 
5061         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5062                 return 0;
5063 
5064         /* If we are really being abused, tell the caller to silently
5065          * drop receive data on the floor.  It will get retransmitted
5066          * and hopefully then we'll have sufficient space.
5067          */
5068         NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
5069 
5070         /* Massive buffer overcommit. */
5071         tp->pred_flags = 0;
5072         return -1;
5073 }
5074 
5075 static bool tcp_should_expand_sndbuf(const struct sock *sk)
5076 {
5077         const struct tcp_sock *tp = tcp_sk(sk);
5078 
5079         /* If the user specified a specific send buffer setting, do
5080          * not modify it.
5081          */
5082         if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
5083                 return false;
5084 
5085         /* If we are under global TCP memory pressure, do not expand.  */
5086         if (tcp_under_memory_pressure(sk))
5087                 return false;
5088 
5089         /* If we are under soft global TCP memory pressure, do not expand.  */
5090         if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
5091                 return false;
5092 
5093         /* If we filled the congestion window, do not expand.  */
5094         if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
5095                 return false;
5096 
5097         return true;
5098 }
5099 
5100 /* When incoming ACK allowed to free some skb from write_queue,
5101  * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
5102  * on the exit from tcp input handler.
5103  *
5104  * PROBLEM: sndbuf expansion does not work well with largesend.
5105  */
5106 static void tcp_new_space(struct sock *sk)
5107 {
5108         struct tcp_sock *tp = tcp_sk(sk);
5109 
5110         if (tcp_should_expand_sndbuf(sk)) {
5111                 tcp_sndbuf_expand(sk);
5112                 tp->snd_cwnd_stamp = tcp_jiffies32;
5113         }
5114 
5115         sk->sk_write_space(sk);
5116 }
5117 
5118 static void tcp_check_space(struct sock *sk)
5119 {
5120         if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
5121                 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
5122                 /* pairs with tcp_poll() */
5123                 smp_mb();
5124                 if (sk->sk_socket &&
5125                     test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
5126                         tcp_new_space(sk);
5127                         if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
5128                                 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
5129                 }
5130         }
5131 }
5132 
5133 static inline void tcp_data_snd_check(struct sock *sk)
5134 {
5135         tcp_push_pending_frames(sk);
5136         tcp_check_space(sk);
5137 }
5138 
5139 /*
5140  * Check if sending an ack is needed.
5141  */
5142 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
5143 {
5144         struct tcp_sock *tp = tcp_sk(sk);
5145         unsigned long rtt, delay;
5146 
5147             /* More than one full frame received... */
5148         if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
5149              /* ... and right edge of window advances far enough.
5150               * (tcp_recvmsg() will send ACK otherwise).
5151               * If application uses SO_RCVLOWAT, we want send ack now if
5152               * we have not received enough bytes to satisfy the condition.
5153               */
5154             (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
5155              __tcp_select_window(sk) >= tp->rcv_wnd)) ||
5156             /* We ACK each frame or... */
5157             tcp_in_quickack_mode(sk)) {
5158 send_now:
5159                 tcp_send_ack(sk);
5160                 return;
5161         }
5162 
5163         if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5164                 tcp_send_delayed_ack(sk);
5165                 return;
5166         }
5167 
5168         if (!tcp_is_sack(tp) ||
5169             tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
5170                 goto send_now;
5171         tp->compressed_ack++;
5172 
5173         if (hrtimer_is_queued(&tp->compressed_ack_timer))
5174                 return;
5175 
5176         /* compress ack timer : 5 % of rtt, but no more than tcp_comp_sack_delay_ns */
5177 
5178         rtt = tp->rcv_rtt_est.rtt_us;
5179         if (tp->srtt_us && tp->srtt_us < rtt)
5180                 rtt = tp->srtt_us;
5181 
5182         delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
5183                       rtt * (NSEC_PER_USEC >> 3)/20);
5184         sock_hold(sk);
5185         hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay),
5186                       HRTIMER_MODE_REL_PINNED_SOFT);
5187 }
5188 
5189 static inline void tcp_ack_snd_check(struct sock *sk)
5190 {
5191         if (!inet_csk_ack_scheduled(sk)) {
5192                 /* We sent a data segment already. */
5193                 return;
5194         }
5195         __tcp_ack_snd_check(sk, 1);
5196 }
5197 
5198 /*
5199  *      This routine is only called when we have urgent data
5200  *      signaled. Its the 'slow' part of tcp_urg. It could be
5201  *      moved inline now as tcp_urg is only called from one
5202  *      place. We handle URGent data wrong. We have to - as
5203  *      BSD still doesn't use the correction from RFC961.
5204  *      For 1003.1g we should support a new option TCP_STDURG to permit
5205  *      either form (or just set the sysctl tcp_stdurg).
5206  */
5207 
5208 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
5209 {
5210         struct tcp_sock *tp = tcp_sk(sk);
5211         u32 ptr = ntohs(th->urg_ptr);
5212 
5213         if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg)
5214                 ptr--;
5215         ptr += ntohl(th->seq);
5216 
5217         /* Ignore urgent data that we've already seen and read. */
5218         if (after(tp->copied_seq, ptr))
5219                 return;
5220 
5221         /* Do not replay urg ptr.
5222          *
5223          * NOTE: interesting situation not covered by specs.
5224          * Misbehaving sender may send urg ptr, pointing to segment,
5225          * which we already have in ofo queue. We are not able to fetch
5226          * such data and will stay in TCP_URG_NOTYET until will be eaten
5227          * by recvmsg(). Seems, we are not obliged to handle such wicked
5228          * situations. But it is worth to think about possibility of some
5229          * DoSes using some hypothetical application level deadlock.
5230          */
5231         if (before(ptr, tp->rcv_nxt))
5232                 return;
5233 
5234         /* Do we already have a newer (or duplicate) urgent pointer? */
5235         if (tp->urg_data && !after(ptr, tp->urg_seq))
5236                 return;
5237 
5238         /* Tell the world about our new urgent pointer. */
5239         sk_send_sigurg(sk);
5240 
5241         /* We may be adding urgent data when the last byte read was
5242          * urgent. To do this requires some care. We cannot just ignore
5243          * tp->copied_seq since we would read the last urgent byte again
5244          * as data, nor can we alter copied_seq until this data arrives
5245          * or we break the semantics of SIOCATMARK (and thus sockatmark())
5246          *
5247          * NOTE. Double Dutch. Rendering to plain English: author of comment
5248          * above did something sort of  send("A", MSG_OOB); send("B", MSG_OOB);
5249          * and expect that both A and B disappear from stream. This is _wrong_.
5250          * Though this happens in BSD with high probability, this is occasional.
5251          * Any application relying on this is buggy. Note also, that fix "works"
5252          * only in this artificial test. Insert some normal data between A and B and we will
5253          * decline of BSD again. Verdict: it is better to remove to trap
5254          * buggy users.
5255          */
5256         if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
5257             !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
5258                 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
5259                 tp->copied_seq++;
5260                 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
5261                         __skb_unlink(skb, &sk->sk_receive_queue);
5262                         __kfree_skb(skb);
5263                 }
5264         }
5265 
5266         tp->urg_data = TCP_URG_NOTYET;
5267         tp->urg_seq = ptr;
5268 
5269         /* Disable header prediction. */
5270         tp->pred_flags = 0;
5271 }
5272 
5273 /* This is the 'fast' part of urgent handling. */
5274 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
5275 {
5276         struct tcp_sock *tp = tcp_sk(sk);
5277 
5278         /* Check if we get a new urgent pointer - normally not. */
5279         if (th->urg)
5280                 tcp_check_urg(sk, th);
5281 
5282         /* Do we wait for any urgent data? - normally not... */
5283         if (tp->urg_data == TCP_URG_NOTYET) {
5284                 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
5285                           th->syn;
5286 
5287                 /* Is the urgent pointer pointing into this packet? */
5288                 if (ptr < skb->len) {
5289                         u8 tmp;
5290                         if (skb_copy_bits(skb, ptr, &tmp, 1))
5291                                 BUG();
5292                         tp->urg_data = TCP_URG_VALID | tmp;
5293                         if (!sock_flag(sk, SOCK_DEAD))
5294                                 sk->sk_data_ready(sk);
5295                 }
5296         }
5297 }
5298 
5299 /* Accept RST for rcv_nxt - 1 after a FIN.
5300  * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
5301  * FIN is sent followed by a RST packet. The RST is sent with the same
5302  * sequence number as the FIN, and thus according to RFC 5961 a challenge
5303  * ACK should be sent. However, Mac OSX rate limits replies to challenge
5304  * ACKs on the closed socket. In addition middleboxes can drop either the
5305  * challenge ACK or a subsequent RST.
5306  */
5307 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
5308 {
5309         struct tcp_sock *tp = tcp_sk(sk);
5310 
5311         return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
5312                         (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
5313                                                TCPF_CLOSING));
5314 }
5315 
5316 /* Does PAWS and seqno based validation of an incoming segment, flags will
5317  * play significant role here.
5318  */
5319 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5320                                   const struct tcphdr *th, int syn_inerr)
5321 {
5322         struct tcp_sock *tp = tcp_sk(sk);
5323         bool rst_seq_match = false;
5324 
5325         /* RFC1323: H1. Apply PAWS check first. */
5326         if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
5327             tp->rx_opt.saw_tstamp &&
5328             tcp_paws_discard(sk, skb)) {
5329                 if (!th->rst) {
5330                         NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5331                         if (!tcp_oow_rate_limited(sock_net(sk), skb,
5332                                                   LINUX_MIB_TCPACKSKIPPEDPAWS,
5333                                                   &tp->last_oow_ack_time))
5334                                 tcp_send_dupack(sk, skb);
5335                         goto discard;
5336                 }
5337                 /* Reset is accepted even if it did not pass PAWS. */
5338         }
5339 
5340         /* Step 1: check sequence number */
5341         if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
5342                 /* RFC793, page 37: "In all states except SYN-SENT, all reset
5343                  * (RST) segments are validated by checking their SEQ-fields."
5344                  * And page 69: "If an incoming segment is not acceptable,
5345                  * an acknowledgment should be sent in reply (unless the RST
5346                  * bit is set, if so drop the segment and return)".
5347                  */
5348                 if (!th->rst) {
5349                         if (th->syn)
5350                                 goto syn_challenge;
5351                         if (!tcp_oow_rate_limited(sock_net(sk), skb,
5352                                                   LINUX_MIB_TCPACKSKIPPEDSEQ,
5353                                                   &tp->last_oow_ack_time))
5354                                 tcp_send_dupack(sk, skb);
5355                 } else if (tcp_reset_check(sk, skb)) {
5356                         tcp_reset(sk);
5357                 }
5358                 goto discard;
5359         }
5360 
5361         /* Step 2: check RST bit */
5362         if (th->rst) {
5363                 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
5364                  * FIN and SACK too if available):
5365                  * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
5366                  * the right-most SACK block,
5367                  * then
5368                  *     RESET the connection
5369                  * else
5370                  *     Send a challenge ACK
5371                  */
5372                 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
5373                     tcp_reset_check(sk, skb)) {
5374                         rst_seq_match = true;
5375                 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
5376                         struct tcp_sack_block *sp = &tp->selective_acks[0];
5377                         int max_sack = sp[0].end_seq;
5378                         int this_sack;
5379 
5380                         for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
5381                              ++this_sack) {
5382                                 max_sack = after(sp[this_sack].end_seq,
5383                                                  max_sack) ?
5384                                         sp[this_sack].end_seq : max_sack;
5385                         }
5386 
5387                         if (TCP_SKB_CB(skb)->seq == max_sack)
5388                                 rst_seq_match = true;
5389                 }
5390 
5391                 if (rst_seq_match)
5392                         tcp_reset(sk);
5393                 else {
5394                         /* Disable TFO if RST is out-of-order
5395                          * and no data has been received
5396                          * for current active TFO socket
5397                          */
5398                         if (tp->syn_fastopen && !tp->data_segs_in &&
5399                             sk->sk_state == TCP_ESTABLISHED)
5400                                 tcp_fastopen_active_disable(sk);
5401                         tcp_send_challenge_ack(sk, skb);
5402                 }
5403                 goto discard;
5404         }
5405 
5406         /* step 3: check security and precedence [ignored] */
5407 
5408         /* step 4: Check for a SYN
5409          * RFC 5961 4.2 : Send a challenge ack
5410          */
5411         if (th->syn) {
5412 syn_challenge:
5413                 if (syn_inerr)
5414                         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5415                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5416                 tcp_send_challenge_ack(sk, skb);
5417                 goto discard;
5418         }
5419 
5420         return true;
5421 
5422 discard:
5423         tcp_drop(sk, skb);
5424         return false;
5425 }
5426 
5427 /*
5428  *      TCP receive function for the ESTABLISHED state.
5429  *
5430  *      It is split into a fast path and a slow path. The fast path is
5431  *      disabled when:
5432  *      - A zero window was announced from us - zero window probing
5433  *        is only handled properly in the slow path.
5434  *      - Out of order segments arrived.
5435  *      - Urgent data is expected.
5436  *      - There is no buffer space left
5437  *      - Unexpected TCP flags/window values/header lengths are received
5438  *        (detected by checking the TCP header against pred_flags)
5439  *      - Data is sent in both directions. Fast path only supports pure senders
5440  *        or pure receivers (this means either the sequence number or the ack
5441  *        value must stay constant)
5442  *      - Unexpected TCP option.
5443  *
5444  *      When these conditions are not satisfied it drops into a standard
5445  *      receive procedure patterned after RFC793 to handle all cases.
5446  *      The first three cases are guaranteed by proper pred_flags setting,
5447  *      the rest is checked inline. Fast processing is turned on in
5448  *      tcp_data_queue when everything is OK.
5449  */
5450 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
5451 {
5452         const struct tcphdr *th = (const struct tcphdr *)skb->data;
5453         struct tcp_sock *tp = tcp_sk(sk);
5454         unsigned int len = skb->len;
5455 
5456         /* TCP congestion window tracking */
5457         trace_tcp_probe(sk, skb);
5458 
5459         tcp_mstamp_refresh(tp);
5460         if (unlikely(!sk->sk_rx_dst))
5461                 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5462         /*
5463          *      Header prediction.
5464          *      The code loosely follows the one in the famous
5465          *      "30 instruction TCP receive" Van Jacobson mail.
5466          *
5467          *      Van's trick is to deposit buffers into socket queue
5468          *      on a device interrupt, to call tcp_recv function
5469          *      on the receive process context and checksum and copy
5470          *      the buffer to user space. smart...
5471          *
5472          *      Our current scheme is not silly either but we take the
5473          *      extra cost of the net_bh soft interrupt processing...
5474          *      We do checksum and copy also but from device to kernel.
5475          */
5476 
5477         tp->rx_opt.saw_tstamp = 0;
5478 
5479         /*      pred_flags is 0xS?10 << 16 + snd_wnd
5480          *      if header_prediction is to be made
5481          *      'S' will always be tp->tcp_header_len >> 2
5482          *      '?' will be 0 for the fast path, otherwise pred_flags is 0 to
5483          *  turn it off (when there are holes in the receive
5484          *       space for instance)
5485          *      PSH flag is ignored.
5486          */
5487 
5488         if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
5489             TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
5490             !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
5491                 int tcp_header_len = tp->tcp_header_len;
5492 
5493                 /* Timestamp header prediction: tcp_header_len
5494                  * is automatically equal to th->doff*4 due to pred_flags
5495                  * match.
5496                  */
5497 
5498                 /* Check timestamp */
5499                 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
5500                         /* No? Slow path! */
5501                         if (!tcp_parse_aligned_timestamp(tp, th))
5502                                 goto slow_path;
5503 
5504                         /* If PAWS failed, check it more carefully in slow path */
5505                         if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
5506                                 goto slow_path;
5507 
5508                         /* DO NOT update ts_recent here, if checksum fails
5509                          * and timestamp was corrupted part, it will result
5510                          * in a hung connection since we will drop all
5511                          * future packets due to the PAWS test.
5512                          */
5513                 }
5514 
5515                 if (len <= tcp_header_len) {
5516                         /* Bulk data transfer: sender */
5517                         if (len == tcp_header_len) {
5518                                 /* Predicted packet is in window by definition.
5519                                  * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5520                                  * Hence, check seq<=rcv_wup reduces to:
5521                                  */
5522                                 if (tcp_header_len ==
5523                                     (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
5524                                     tp->rcv_nxt == tp->rcv_wup)
5525                                         tcp_store_ts_recent(tp);
5526 
5527                                 /* We know that such packets are checksummed
5528                                  * on entry.
5529                                  */
5530                                 tcp_ack(sk, skb, 0);
5531                                 __kfree_skb(skb);
5532                                 tcp_data_snd_check(sk);
5533                                 return;
5534                         } else { /* Header too small */
5535                                 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5536                                 goto discard;
5537                         }
5538                 } else {
5539                         int eaten = 0;
5540                         bool fragstolen = false;
5541 
5542                         if (tcp_checksum_complete(skb))
5543                                 goto csum_error;
5544 
5545                         if ((int)skb->truesize > sk->sk_forward_alloc)
5546                                 goto step5;
5547 
5548                         /* Predicted packet is in window by definition.
5549                          * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5550                          * Hence, check seq<=rcv_wup reduces to:
5551                          */
5552                         if (tcp_header_len ==
5553                             (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
5554                             tp->rcv_nxt == tp->rcv_wup)
5555                                 tcp_store_ts_recent(tp);
5556 
5557                         tcp_rcv_rtt_measure_ts(sk, skb);
5558 
5559                         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
5560 
5561                         /* Bulk data transfer: receiver */
5562                         eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
5563                                               &fragstolen);
5564 
5565                         tcp_event_data_recv(sk, skb);
5566 
5567                         if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
5568                                 /* Well, only one small jumplet in fast path... */
5569                                 tcp_ack(sk, skb, FLAG_DATA);
5570                                 tcp_data_snd_check(sk);
5571                                 if (!inet_csk_ack_scheduled(sk))
5572                                         goto no_ack;
5573                         }
5574 
5575                         __tcp_ack_snd_check(sk, 0);
5576 no_ack:
5577                         if (eaten)
5578                                 kfree_skb_partial(skb, fragstolen);
5579                         tcp_data_ready(sk);
5580                         return;
5581                 }
5582         }
5583 
5584 slow_path:
5585         if (len < (th->doff << 2) || tcp_checksum_complete(skb))
5586                 goto csum_error;
5587 
5588         if (!th->ack && !th->rst && !th->syn)
5589                 goto discard;
5590 
5591         /*
5592          *      Standard slow path.
5593          */
5594 
5595         if (!tcp_validate_incoming(sk, skb, th, 1))
5596                 return;
5597 
5598 step5:
5599         if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
5600                 goto discard;
5601 
5602         tcp_rcv_rtt_measure_ts(sk, skb);
5603 
5604         /* Process urgent data. */
5605         tcp_urg(sk, skb, th);
5606 
5607         /* step 7: process the segment text */
5608         tcp_data_queue(sk, skb);
5609 
5610         tcp_data_snd_check(sk);
5611         tcp_ack_snd_check(sk);
5612         return;
5613 
5614 csum_error:
5615         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
5616         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5617 
5618 discard:
5619         tcp_drop(sk, skb);
5620 }
5621 EXPORT_SYMBOL(tcp_rcv_established);
5622 
5623 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5624 {
5625         struct tcp_sock *tp = tcp_sk(sk);
5626         struct inet_connection_sock *icsk = inet_csk(sk);
5627 
5628         tcp_set_state(sk, TCP_ESTABLISHED);
5629         icsk->icsk_ack.lrcvtime = tcp_jiffies32;
5630 
5631         if (skb) {
5632                 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5633                 security_inet_conn_established(sk, skb);
5634         }
5635 
5636         tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB);
5637 
5638         /* Prevent spurious tcp_cwnd_restart() on first data
5639          * packet.
5640          */
5641         tp->lsndtime = tcp_jiffies32;
5642 
5643         if (sock_flag(sk, SOCK_KEEPOPEN))
5644                 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
5645 
5646         if (!tp->rx_opt.snd_wscale)
5647                 __tcp_fast_path_on(tp, tp->snd_wnd);
5648         else
5649                 tp->pred_flags = 0;
5650 }
5651 
5652 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5653                                     struct tcp_fastopen_cookie *cookie)
5654 {
5655         struct tcp_sock *tp = tcp_sk(sk);
5656         struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL;
5657         u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
5658         bool syn_drop = false;
5659 
5660         if (mss == tp->rx_opt.user_mss) {
5661                 struct tcp_options_received opt;
5662 
5663                 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
5664                 tcp_clear_options(&opt);
5665                 opt.user_mss = opt.mss_clamp = 0;
5666                 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
5667                 mss = opt.mss_clamp;
5668         }
5669 
5670         if (!tp->syn_fastopen) {
5671                 /* Ignore an unsolicited cookie */
5672                 cookie->len = -1;
5673         } else if (tp->total_retrans) {
5674                 /* SYN timed out and the SYN-ACK neither has a cookie nor
5675                  * acknowledges data. Presumably the remote received only
5676                  * the retransmitted (regular) SYNs: either the original
5677                  * SYN-data or the corresponding SYN-ACK was dropped.
5678                  */
5679                 syn_drop = (cookie->len < 0 && data);
5680         } else if (cookie->len < 0 && !tp->syn_data) {
5681                 /* We requested a cookie but didn't get it. If we did not use
5682                  * the (old) exp opt format then try so next time (try_exp=1).
5683                  * Otherwise we go back to use the RFC7413 opt (try_exp=2).
5684                  */
5685                 try_exp = tp->syn_fastopen_exp ? 2 : 1;
5686         }
5687 
5688         tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
5689 
5690         if (data) { /* Retransmit unacked data in SYN */
5691                 skb_rbtree_walk_from(data) {
5692                         if (__tcp_retransmit_skb(sk, data, 1))
5693                                 break;
5694                 }
5695                 tcp_rearm_rto(sk);
5696                 NET_INC_STATS(sock_net(sk),
5697                                 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
5698                 return true;
5699         }
5700         tp->syn_data_acked = tp->syn_data;
5701         if (tp->syn_data_acked) {
5702                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
5703                 /* SYN-data is counted as two separate packets in tcp_ack() */
5704                 if (tp->delivered > 1)
5705                         --tp->delivered;
5706         }
5707 
5708         tcp_fastopen_add_skb(sk, synack);
5709 
5710         return false;
5711 }
5712 
5713 static void smc_check_reset_syn(struct tcp_sock *tp)
5714 {
5715 #if IS_ENABLED(CONFIG_SMC)
5716         if (static_branch_unlikely(&tcp_have_smc)) {
5717                 if (tp->syn_smc && !tp->rx_opt.smc_ok)
5718                         tp->syn_smc = 0;
5719         }
5720 #endif
5721 }
5722 
5723 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5724                                          const struct tcphdr *th)
5725 {
5726         struct inet_connection_sock *icsk = inet_csk(sk);
5727         struct tcp_sock *tp = tcp_sk(sk);
5728         struct tcp_fastopen_cookie foc = { .len = -1 };
5729         int saved_clamp = tp->rx_opt.mss_clamp;
5730         bool fastopen_fail;
5731 
5732         tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
5733         if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
5734                 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5735 
5736         if (th->ack) {
5737                 /* rfc793:
5738                  * "If the state is SYN-SENT then
5739                  *    first check the ACK bit
5740                  *      If the ACK bit is set
5741                  *        If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
5742                  *        a reset (unless the RST bit is set, if so drop
5743                  *        the segment and return)"
5744                  */
5745                 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
5746                     after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
5747                         goto reset_and_undo;
5748 
5749                 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
5750                     !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
5751                              tcp_time_stamp(tp))) {
5752                         NET_INC_STATS(sock_net(sk),
5753                                         LINUX_MIB_PAWSACTIVEREJECTED);
5754                         goto reset_and_undo;
5755                 }
5756 
5757                 /* Now ACK is acceptable.
5758                  *
5759                  * "If the RST bit is set
5760                  *    If the ACK was acceptable then signal the user "error:
5761                  *    connection reset", drop the segment, enter CLOSED state,
5762                  *    delete TCB, and return."
5763                  */
5764 
5765                 if (th->rst) {
5766                         tcp_reset(sk);
5767                         goto discard;
5768                 }
5769 
5770                 /* rfc793:
5771                  *   "fifth, if neither of the SYN or RST bits is set then
5772                  *    drop the segment and return."
5773                  *
5774                  *    See note below!
5775                  *                                        --ANK(990513)
5776                  */
5777                 if (!th->syn)
5778                         goto discard_and_undo;
5779 
5780                 /* rfc793:
5781                  *   "If the SYN bit is on ...
5782                  *    are acceptable then ...
5783                  *    (our SYN has been ACKed), change the connection
5784                  *    state to ESTABLISHED..."
5785                  */
5786 
5787                 tcp_ecn_rcv_synack(tp, th);
5788 
5789                 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5790                 tcp_ack(sk, skb, FLAG_SLOWPATH);
5791 
5792                 /* Ok.. it's good. Set up sequence numbers and
5793                  * move to established.
5794                  */
5795                 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
5796                 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
5797 
5798                 /* RFC1323: The window in SYN & SYN/ACK segments is
5799                  * never scaled.
5800                  */
5801                 tp->snd_wnd = ntohs(th->window);
5802 
5803                 if (!tp->rx_opt.wscale_ok) {
5804                         tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
5805                         tp->window_clamp = min(tp->window_clamp, 65535U);
5806                 }
5807 
5808                 if (tp->rx_opt.saw_tstamp) {
5809                         tp->rx_opt.tstamp_ok       = 1;
5810                         tp->tcp_header_len =
5811                                 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
5812                         tp->advmss          -= TCPOLEN_TSTAMP_ALIGNED;
5813                         tcp_store_ts_recent(tp);
5814                 } else {
5815                         tp->tcp_header_len = sizeof(struct tcphdr);
5816                 }
5817 
5818                 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
5819                 tcp_initialize_rcv_mss(sk);
5820 
5821                 /* Remember, tcp_poll() does not lock socket!
5822                  * Change state from SYN-SENT only after copied_seq
5823                  * is initialized. */
5824                 tp->copied_seq = tp->rcv_nxt;
5825 
5826                 smc_check_reset_syn(tp);
5827 
5828                 smp_mb();
5829 
5830                 tcp_finish_connect(sk, skb);
5831 
5832                 fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
5833                                 tcp_rcv_fastopen_synack(sk, skb, &foc);
5834 
5835                 if (!sock_flag(sk, SOCK_DEAD)) {
5836                         sk->sk_state_change(sk);
5837                         sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5838                 }
5839                 if (fastopen_fail)
5840                         return -1;
5841                 if (sk->sk_write_pending ||
5842                     icsk->icsk_accept_queue.rskq_defer_accept ||
5843                     icsk->icsk_ack.pingpong) {
5844                         /* Save one ACK. Data will be ready after
5845                          * several ticks, if write_pending is set.
5846                          *
5847                          * It may be deleted, but with this feature tcpdumps
5848                          * look so _wonderfully_ clever, that I was not able
5849                          * to stand against the temptation 8)     --ANK
5850                          */
5851                         inet_csk_schedule_ack(sk);
5852                         tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5853                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5854                                                   TCP_DELACK_MAX, TCP_RTO_MAX);
5855 
5856 discard:
5857                         tcp_drop(sk, skb);
5858                         return 0;
5859                 } else {
5860                         tcp_send_ack(sk);
5861                 }
5862                 return -1;
5863         }
5864 
5865         /* No ACK in the segment */
5866 
5867         if (th->rst) {
5868                 /* rfc793:
5869                  * "If the RST bit is set
5870                  *
5871                  *      Otherwise (no ACK) drop the segment and return."
5872                  */
5873 
5874                 goto discard_and_undo;
5875         }
5876 
5877         /* PAWS check. */
5878         if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
5879             tcp_paws_reject(&tp->rx_opt, 0))
5880                 goto discard_and_undo;
5881 
5882         if (th->syn) {
5883                 /* We see SYN without ACK. It is attempt of
5884                  * simultaneous connect with crossed SYNs.
5885                  * Particularly, it can be connect to self.
5886                  */
5887                 tcp_set_state(sk, TCP_SYN_RECV);
5888 
5889                 if (tp->rx_opt.saw_tstamp) {
5890                         tp->rx_opt.tstamp_ok = 1;
5891                         tcp_store_ts_recent(tp);
5892                         tp->tcp_header_len =
5893                                 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
5894                 } else {
5895                         tp->tcp_header_len = sizeof(struct tcphdr);
5896                 }
5897 
5898                 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
5899                 tp->copied_seq = tp->rcv_nxt;
5900                 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
5901 
5902                 /* RFC1323: The window in SYN & SYN/ACK segments is
5903                  * never scaled.
5904                  */
5905                 tp->snd_wnd    = ntohs(th->window);
5906                 tp->snd_wl1    = TCP_SKB_CB(skb)->seq;
5907                 tp->max_window = tp->snd_wnd;
5908 
5909                 tcp_ecn_rcv_syn(tp, th);
5910 
5911                 tcp_mtup_init(sk);
5912                 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
5913                 tcp_initialize_rcv_mss(sk);
5914 
5915                 tcp_send_synack(sk);
5916 #if 0
5917                 /* Note, we could accept data and URG from this segment.
5918                  * There are no obstacles to make this (except that we must
5919                  * either change tcp_recvmsg() to prevent it from returning data
5920                  * before 3WHS completes per RFC793, or employ TCP Fast Open).
5921                  *
5922                  * However, if we ignore data in ACKless segments sometimes,
5923                  * we have no reasons to accept it sometimes.
5924                  * Also, seems the code doing it in step6 of tcp_rcv_state_process
5925                  * is not flawless. So, discard packet for sanity.
5926                  * Uncomment this return to process the data.
5927                  */
5928                 return -1;
5929 #else
5930                 goto discard;
5931 #endif
5932         }
5933         /* "fifth, if neither of the SYN or RST bits is set then
5934          * drop the segment and return."
5935          */
5936 
5937 discard_and_undo:
5938         tcp_clear_options(&tp->rx_opt);
5939         tp->rx_opt.mss_clamp = saved_clamp;
5940         goto discard;
5941 
5942 reset_and_undo:
5943         tcp_clear_options(&tp->rx_opt);
5944         tp->rx_opt.mss_clamp = saved_clamp;
5945         return 1;
5946 }
5947 
5948 /*
5949  *      This function implements the receiving procedure of RFC 793 for
5950  *      all states except ESTABLISHED and TIME_WAIT.
5951  *      It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
5952  *      address independent.
5953  */
5954 
5955 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5956 {
5957         struct tcp_sock *tp = tcp_sk(sk);
5958         struct inet_connection_sock *icsk = inet_csk(sk);
5959         const struct tcphdr *th = tcp_hdr(skb);
5960         struct request_sock *req;
5961         int queued = 0;
5962         bool acceptable;
5963 
5964         switch (sk->sk_state) {
5965         case TCP_CLOSE:
5966                 goto discard;
5967 
5968