~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/tcp_recovery.c

Version: ~ [ linux-5.8-rc5 ] ~ [ linux-5.7.8 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.51 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.132 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.188 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.230 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.230 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/tcp.h>
  3 #include <net/tcp.h>
  4 
  5 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
  6 {
  7         struct tcp_sock *tp = tcp_sk(sk);
  8 
  9         tcp_skb_mark_lost_uncond_verify(tp, skb);
 10         if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
 11                 /* Account for retransmits that are lost again */
 12                 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 13                 tp->retrans_out -= tcp_skb_pcount(skb);
 14                 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
 15                               tcp_skb_pcount(skb));
 16         }
 17 }
 18 
 19 static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
 20 {
 21         return t1 > t2 || (t1 == t2 && after(seq1, seq2));
 22 }
 23 
 24 static u32 tcp_rack_reo_wnd(const struct sock *sk)
 25 {
 26         struct tcp_sock *tp = tcp_sk(sk);
 27 
 28         if (!tp->rack.reord) {
 29                 /* If reordering has not been observed, be aggressive during
 30                  * the recovery or starting the recovery by DUPACK threshold.
 31                  */
 32                 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
 33                         return 0;
 34 
 35                 if (tp->sacked_out >= tp->reordering &&
 36                     !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
 37                         return 0;
 38         }
 39 
 40         /* To be more reordering resilient, allow min_rtt/4 settling delay.
 41          * Use min_rtt instead of the smoothed RTT because reordering is
 42          * often a path property and less related to queuing or delayed ACKs.
 43          * Upon receiving DSACKs, linearly increase the window up to the
 44          * smoothed RTT.
 45          */
 46         return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
 47                    tp->srtt_us >> 3);
 48 }
 49 
 50 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 51 {
 52         return tp->rack.rtt_us + reo_wnd -
 53                tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
 54 }
 55 
 56 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
 57  *
 58  * Marks a packet lost, if some packet sent later has been (s)acked.
 59  * The underlying idea is similar to the traditional dupthresh and FACK
 60  * but they look at different metrics:
 61  *
 62  * dupthresh: 3 OOO packets delivered (packet count)
 63  * FACK: sequence delta to highest sacked sequence (sequence space)
 64  * RACK: sent time delta to the latest delivered packet (time domain)
 65  *
 66  * The advantage of RACK is it applies to both original and retransmitted
 67  * packet and therefore is robust against tail losses. Another advantage
 68  * is being more resilient to reordering by simply allowing some
 69  * "settling delay", instead of tweaking the dupthresh.
 70  *
 71  * When tcp_rack_detect_loss() detects some packets are lost and we
 72  * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
 73  * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
 74  * make us enter the CA_Recovery state.
 75  */
 76 static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
 77 {
 78         struct tcp_sock *tp = tcp_sk(sk);
 79         struct sk_buff *skb, *n;
 80         u32 reo_wnd;
 81 
 82         *reo_timeout = 0;
 83         reo_wnd = tcp_rack_reo_wnd(sk);
 84         list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
 85                                  tcp_tsorted_anchor) {
 86                 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
 87                 s32 remaining;
 88 
 89                 /* Skip ones marked lost but not yet retransmitted */
 90                 if ((scb->sacked & TCPCB_LOST) &&
 91                     !(scb->sacked & TCPCB_SACKED_RETRANS))
 92                         continue;
 93 
 94                 if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
 95                                          tp->rack.end_seq, scb->end_seq))
 96                         break;
 97 
 98                 /* A packet is lost if it has not been s/acked beyond
 99                  * the recent RTT plus the reordering window.
100                  */
101                 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
102                 if (remaining <= 0) {
103                         tcp_mark_skb_lost(sk, skb);
104                         list_del_init(&skb->tcp_tsorted_anchor);
105                 } else {
106                         /* Record maximum wait time */
107                         *reo_timeout = max_t(u32, *reo_timeout, remaining);
108                 }
109         }
110 }
111 
112 void tcp_rack_mark_lost(struct sock *sk)
113 {
114         struct tcp_sock *tp = tcp_sk(sk);
115         u32 timeout;
116 
117         if (!tp->rack.advanced)
118                 return;
119 
120         /* Reset the advanced flag to avoid unnecessary queue scanning */
121         tp->rack.advanced = 0;
122         tcp_rack_detect_loss(sk, &timeout);
123         if (timeout) {
124                 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
125                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
126                                           timeout, inet_csk(sk)->icsk_rto);
127         }
128 }
129 
130 /* Record the most recently (re)sent time among the (s)acked packets
131  * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
132  * draft-cheng-tcpm-rack-00.txt
133  */
134 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
135                       u64 xmit_time)
136 {
137         u32 rtt_us;
138 
139         rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
140         if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
141                 /* If the sacked packet was retransmitted, it's ambiguous
142                  * whether the retransmission or the original (or the prior
143                  * retransmission) was sacked.
144                  *
145                  * If the original is lost, there is no ambiguity. Otherwise
146                  * we assume the original can be delayed up to aRTT + min_rtt.
147                  * the aRTT term is bounded by the fast recovery or timeout,
148                  * so it's at least one RTT (i.e., retransmission is at least
149                  * an RTT later).
150                  */
151                 return;
152         }
153         tp->rack.advanced = 1;
154         tp->rack.rtt_us = rtt_us;
155         if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
156                                 end_seq, tp->rack.end_seq)) {
157                 tp->rack.mstamp = xmit_time;
158                 tp->rack.end_seq = end_seq;
159         }
160 }
161 
162 /* We have waited long enough to accommodate reordering. Mark the expired
163  * packets lost and retransmit them.
164  */
165 void tcp_rack_reo_timeout(struct sock *sk)
166 {
167         struct tcp_sock *tp = tcp_sk(sk);
168         u32 timeout, prior_inflight;
169 
170         prior_inflight = tcp_packets_in_flight(tp);
171         tcp_rack_detect_loss(sk, &timeout);
172         if (prior_inflight != tcp_packets_in_flight(tp)) {
173                 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
174                         tcp_enter_recovery(sk, false);
175                         if (!inet_csk(sk)->icsk_ca_ops->cong_control)
176                                 tcp_cwnd_reduction(sk, 1, 0);
177                 }
178                 tcp_xmit_retransmit_queue(sk);
179         }
180         if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
181                 tcp_rearm_rto(sk);
182 }
183 
184 /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
185  *
186  * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
187  * by srtt), since there is possibility that spurious retransmission was
188  * due to reordering delay longer than reo_wnd.
189  *
190  * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
191  * no. of successful recoveries (accounts for full DSACK-based loss
192  * recovery undo). After that, reset it to default (min_rtt/4).
193  *
194  * At max, reo_wnd is incremented only once per rtt. So that the new
195  * DSACK on which we are reacting, is due to the spurious retx (approx)
196  * after the reo_wnd has been updated last time.
197  *
198  * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
199  * absolute value to account for change in rtt.
200  */
201 void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
202 {
203         struct tcp_sock *tp = tcp_sk(sk);
204 
205         if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
206             !rs->prior_delivered)
207                 return;
208 
209         /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
210         if (before(rs->prior_delivered, tp->rack.last_delivered))
211                 tp->rack.dsack_seen = 0;
212 
213         /* Adjust the reo_wnd if update is pending */
214         if (tp->rack.dsack_seen) {
215                 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
216                                                tp->rack.reo_wnd_steps + 1);
217                 tp->rack.dsack_seen = 0;
218                 tp->rack.last_delivered = tp->delivered;
219                 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
220         } else if (!tp->rack.reo_wnd_persist) {
221                 tp->rack.reo_wnd_steps = 1;
222         }
223 }
224 
225 /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
226  * the next unacked packet upon receiving
227  * a) three or more DUPACKs to start the fast recovery
228  * b) an ACK acknowledging new data during the fast recovery.
229  */
230 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
231 {
232         const u8 state = inet_csk(sk)->icsk_ca_state;
233         struct tcp_sock *tp = tcp_sk(sk);
234 
235         if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
236             (state == TCP_CA_Recovery && snd_una_advanced)) {
237                 struct sk_buff *skb = tcp_rtx_queue_head(sk);
238                 u32 mss;
239 
240                 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
241                         return;
242 
243                 mss = tcp_skb_mss(skb);
244                 if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
245                         tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
246                                      mss, mss, GFP_ATOMIC);
247 
248                 tcp_skb_mark_lost_uncond_verify(tp, skb);
249         }
250 }
251 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp