~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/tcp_minisocks.c

Version: ~ [ linux-5.12-rc7 ] ~ [ linux-5.11.13 ] ~ [ linux-5.10.29 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.111 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.186 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.230 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.266 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.266 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  4  *              operating system.  INET is implemented using the  BSD Socket
  5  *              interface as the means of communication with the user level.
  6  *
  7  *              Implementation of the Transmission Control Protocol(TCP).
  8  *
  9  * Authors:     Ross Biro
 10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
 12  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 13  *              Florian La Roche, <flla@stud.uni-sb.de>
 14  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
 16  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 17  *              Matthew Dillon, <dillon@apollo.west.oic.com>
 18  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19  *              Jorge Cwik, <jorge@laser.satlink.net>
 20  */
 21 
 22 #include <linux/mm.h>
 23 #include <linux/module.h>
 24 #include <linux/slab.h>
 25 #include <linux/sysctl.h>
 26 #include <linux/workqueue.h>
 27 #include <linux/static_key.h>
 28 #include <net/tcp.h>
 29 #include <net/inet_common.h>
 30 #include <net/xfrm.h>
 31 #include <net/busy_poll.h>
 32 
 33 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 34 {
 35         if (seq == s_win)
 36                 return true;
 37         if (after(end_seq, s_win) && before(seq, e_win))
 38                 return true;
 39         return seq == e_win && seq == end_seq;
 40 }
 41 
 42 static enum tcp_tw_status
 43 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 44                                   const struct sk_buff *skb, int mib_idx)
 45 {
 46         struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 47 
 48         if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 49                                   &tcptw->tw_last_oow_ack_time)) {
 50                 /* Send ACK. Note, we do not put the bucket,
 51                  * it will be released by caller.
 52                  */
 53                 return TCP_TW_ACK;
 54         }
 55 
 56         /* We are rate-limiting, so just release the tw sock and drop skb. */
 57         inet_twsk_put(tw);
 58         return TCP_TW_SUCCESS;
 59 }
 60 
 61 /*
 62  * * Main purpose of TIME-WAIT state is to close connection gracefully,
 63  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 64  *   (and, probably, tail of data) and one or more our ACKs are lost.
 65  * * What is TIME-WAIT timeout? It is associated with maximal packet
 66  *   lifetime in the internet, which results in wrong conclusion, that
 67  *   it is set to catch "old duplicate segments" wandering out of their path.
 68  *   It is not quite correct. This timeout is calculated so that it exceeds
 69  *   maximal retransmission timeout enough to allow to lose one (or more)
 70  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 71  * * When TIME-WAIT socket receives RST, it means that another end
 72  *   finally closed and we are allowed to kill TIME-WAIT too.
 73  * * Second purpose of TIME-WAIT is catching old duplicate segments.
 74  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 75  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 76  * * If we invented some more clever way to catch duplicates
 77  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 78  *
 79  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 80  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 81  * from the very beginning.
 82  *
 83  * NOTE. With recycling (and later with fin-wait-2) TW bucket
 84  * is _not_ stateless. It means, that strictly speaking we must
 85  * spinlock it. I do not want! Well, probability of misbehaviour
 86  * is ridiculously low and, seems, we could use some mb() tricks
 87  * to avoid misread sequence numbers, states etc.  --ANK
 88  *
 89  * We don't need to initialize tmp_out.sack_ok as we don't use the results
 90  */
 91 enum tcp_tw_status
 92 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 93                            const struct tcphdr *th)
 94 {
 95         struct tcp_options_received tmp_opt;
 96         struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 97         bool paws_reject = false;
 98 
 99         tmp_opt.saw_tstamp = 0;
100         if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101                 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102 
103                 if (tmp_opt.saw_tstamp) {
104                         if (tmp_opt.rcv_tsecr)
105                                 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106                         tmp_opt.ts_recent       = tcptw->tw_ts_recent;
107                         tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
108                         paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109                 }
110         }
111 
112         if (tw->tw_substate == TCP_FIN_WAIT2) {
113                 /* Just repeat all the checks of tcp_rcv_state_process() */
114 
115                 /* Out of window, send ACK */
116                 if (paws_reject ||
117                     !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118                                    tcptw->tw_rcv_nxt,
119                                    tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120                         return tcp_timewait_check_oow_rate_limit(
121                                 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122 
123                 if (th->rst)
124                         goto kill;
125 
126                 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127                         return TCP_TW_RST;
128 
129                 /* Dup ACK? */
130                 if (!th->ack ||
131                     !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132                     TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133                         inet_twsk_put(tw);
134                         return TCP_TW_SUCCESS;
135                 }
136 
137                 /* New data or FIN. If new data arrive after half-duplex close,
138                  * reset.
139                  */
140                 if (!th->fin ||
141                     TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142                         return TCP_TW_RST;
143 
144                 /* FIN arrived, enter true time-wait state. */
145                 tw->tw_substate   = TCP_TIME_WAIT;
146                 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147                 if (tmp_opt.saw_tstamp) {
148                         tcptw->tw_ts_recent_stamp = ktime_get_seconds();
149                         tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
150                 }
151 
152                 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153                 return TCP_TW_ACK;
154         }
155 
156         /*
157          *      Now real TIME-WAIT state.
158          *
159          *      RFC 1122:
160          *      "When a connection is [...] on TIME-WAIT state [...]
161          *      [a TCP] MAY accept a new SYN from the remote TCP to
162          *      reopen the connection directly, if it:
163          *
164          *      (1)  assigns its initial sequence number for the new
165          *      connection to be larger than the largest sequence
166          *      number it used on the previous connection incarnation,
167          *      and
168          *
169          *      (2)  returns to TIME-WAIT state if the SYN turns out
170          *      to be an old duplicate".
171          */
172 
173         if (!paws_reject &&
174             (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175              (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176                 /* In window segment, it may be only reset or bare ack. */
177 
178                 if (th->rst) {
179                         /* This is TIME_WAIT assassination, in two flavors.
180                          * Oh well... nobody has a sufficient solution to this
181                          * protocol bug yet.
182                          */
183                         if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
184 kill:
185                                 inet_twsk_deschedule_put(tw);
186                                 return TCP_TW_SUCCESS;
187                         }
188                 } else {
189                         inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190                 }
191 
192                 if (tmp_opt.saw_tstamp) {
193                         tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
194                         tcptw->tw_ts_recent_stamp = ktime_get_seconds();
195                 }
196 
197                 inet_twsk_put(tw);
198                 return TCP_TW_SUCCESS;
199         }
200 
201         /* Out of window segment.
202 
203            All the segments are ACKed immediately.
204 
205            The only exception is new SYN. We accept it, if it is
206            not old duplicate and we are not in danger to be killed
207            by delayed old duplicates. RFC check is that it has
208            newer sequence number works at rates <40Mbit/sec.
209            However, if paws works, it is reliable AND even more,
210            we even may relax silly seq space cutoff.
211 
212            RED-PEN: we violate main RFC requirement, if this SYN will appear
213            old duplicate (i.e. we receive RST in reply to SYN-ACK),
214            we must return socket to time-wait state. It is not good,
215            but not fatal yet.
216          */
217 
218         if (th->syn && !th->rst && !th->ack && !paws_reject &&
219             (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220              (tmp_opt.saw_tstamp &&
221               (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222                 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
223                 if (isn == 0)
224                         isn++;
225                 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
226                 return TCP_TW_SYN;
227         }
228 
229         if (paws_reject)
230                 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
231 
232         if (!th->rst) {
233                 /* In this case we must reset the TIMEWAIT timer.
234                  *
235                  * If it is ACKless SYN it may be both old duplicate
236                  * and new good SYN with random sequence number <rcv_nxt.
237                  * Do not reschedule in the last case.
238                  */
239                 if (paws_reject || th->ack)
240                         inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
241 
242                 return tcp_timewait_check_oow_rate_limit(
243                         tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
244         }
245         inet_twsk_put(tw);
246         return TCP_TW_SUCCESS;
247 }
248 EXPORT_SYMBOL(tcp_timewait_state_process);
249 
250 /*
251  * Move a socket to time-wait or dead fin-wait-2 state.
252  */
253 void tcp_time_wait(struct sock *sk, int state, int timeo)
254 {
255         const struct inet_connection_sock *icsk = inet_csk(sk);
256         const struct tcp_sock *tp = tcp_sk(sk);
257         struct inet_timewait_sock *tw;
258         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
259 
260         tw = inet_twsk_alloc(sk, tcp_death_row, state);
261 
262         if (tw) {
263                 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
264                 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
265                 struct inet_sock *inet = inet_sk(sk);
266 
267                 tw->tw_transparent      = inet->transparent;
268                 tw->tw_mark             = sk->sk_mark;
269                 tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
270                 tcptw->tw_rcv_nxt       = tp->rcv_nxt;
271                 tcptw->tw_snd_nxt       = tp->snd_nxt;
272                 tcptw->tw_rcv_wnd       = tcp_receive_window(tp);
273                 tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
274                 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
275                 tcptw->tw_ts_offset     = tp->tsoffset;
276                 tcptw->tw_last_oow_ack_time = 0;
277                 tcptw->tw_tx_delay      = tp->tcp_tx_delay;
278 #if IS_ENABLED(CONFIG_IPV6)
279                 if (tw->tw_family == PF_INET6) {
280                         struct ipv6_pinfo *np = inet6_sk(sk);
281 
282                         tw->tw_v6_daddr = sk->sk_v6_daddr;
283                         tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
284                         tw->tw_tclass = np->tclass;
285                         tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
286                         tw->tw_txhash = sk->sk_txhash;
287                         tw->tw_ipv6only = sk->sk_ipv6only;
288                 }
289 #endif
290 
291 #ifdef CONFIG_TCP_MD5SIG
292                 /*
293                  * The timewait bucket does not have the key DB from the
294                  * sock structure. We just make a quick copy of the
295                  * md5 key being used (if indeed we are using one)
296                  * so the timewait ack generating code has the key.
297                  */
298                 do {
299                         tcptw->tw_md5_key = NULL;
300                         if (static_branch_unlikely(&tcp_md5_needed)) {
301                                 struct tcp_md5sig_key *key;
302 
303                                 key = tp->af_specific->md5_lookup(sk, sk);
304                                 if (key) {
305                                         tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
306                                         BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
307                                 }
308                         }
309                 } while (0);
310 #endif
311 
312                 /* Get the TIME_WAIT timeout firing. */
313                 if (timeo < rto)
314                         timeo = rto;
315 
316                 if (state == TCP_TIME_WAIT)
317                         timeo = TCP_TIMEWAIT_LEN;
318 
319                 /* tw_timer is pinned, so we need to make sure BH are disabled
320                  * in following section, otherwise timer handler could run before
321                  * we complete the initialization.
322                  */
323                 local_bh_disable();
324                 inet_twsk_schedule(tw, timeo);
325                 /* Linkage updates.
326                  * Note that access to tw after this point is illegal.
327                  */
328                 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
329                 local_bh_enable();
330         } else {
331                 /* Sorry, if we're out of memory, just CLOSE this
332                  * socket up.  We've got bigger problems than
333                  * non-graceful socket closings.
334                  */
335                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
336         }
337 
338         tcp_update_metrics(sk);
339         tcp_done(sk);
340 }
341 EXPORT_SYMBOL(tcp_time_wait);
342 
343 void tcp_twsk_destructor(struct sock *sk)
344 {
345 #ifdef CONFIG_TCP_MD5SIG
346         if (static_branch_unlikely(&tcp_md5_needed)) {
347                 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
348 
349                 if (twsk->tw_md5_key)
350                         kfree_rcu(twsk->tw_md5_key, rcu);
351         }
352 #endif
353 }
354 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
355 
356 /* Warning : This function is called without sk_listener being locked.
357  * Be sure to read socket fields once, as their value could change under us.
358  */
359 void tcp_openreq_init_rwin(struct request_sock *req,
360                            const struct sock *sk_listener,
361                            const struct dst_entry *dst)
362 {
363         struct inet_request_sock *ireq = inet_rsk(req);
364         const struct tcp_sock *tp = tcp_sk(sk_listener);
365         int full_space = tcp_full_space(sk_listener);
366         u32 window_clamp;
367         __u8 rcv_wscale;
368         u32 rcv_wnd;
369         int mss;
370 
371         mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
372         window_clamp = READ_ONCE(tp->window_clamp);
373         /* Set this up on the first call only */
374         req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
375 
376         /* limit the window selection if the user enforce a smaller rx buffer */
377         if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
378             (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
379                 req->rsk_window_clamp = full_space;
380 
381         rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
382         if (rcv_wnd == 0)
383                 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
384         else if (full_space < rcv_wnd * mss)
385                 full_space = rcv_wnd * mss;
386 
387         /* tcp_full_space because it is guaranteed to be the first packet */
388         tcp_select_initial_window(sk_listener, full_space,
389                 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
390                 &req->rsk_rcv_wnd,
391                 &req->rsk_window_clamp,
392                 ireq->wscale_ok,
393                 &rcv_wscale,
394                 rcv_wnd);
395         ireq->rcv_wscale = rcv_wscale;
396 }
397 EXPORT_SYMBOL(tcp_openreq_init_rwin);
398 
399 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
400                                   const struct request_sock *req)
401 {
402         tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
403 }
404 
405 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
406 {
407         struct inet_connection_sock *icsk = inet_csk(sk);
408         u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
409         bool ca_got_dst = false;
410 
411         if (ca_key != TCP_CA_UNSPEC) {
412                 const struct tcp_congestion_ops *ca;
413 
414                 rcu_read_lock();
415                 ca = tcp_ca_find_key(ca_key);
416                 if (likely(ca && try_module_get(ca->owner))) {
417                         icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
418                         icsk->icsk_ca_ops = ca;
419                         ca_got_dst = true;
420                 }
421                 rcu_read_unlock();
422         }
423 
424         /* If no valid choice made yet, assign current system default ca. */
425         if (!ca_got_dst &&
426             (!icsk->icsk_ca_setsockopt ||
427              !try_module_get(icsk->icsk_ca_ops->owner)))
428                 tcp_assign_congestion_control(sk);
429 
430         tcp_set_ca_state(sk, TCP_CA_Open);
431 }
432 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
433 
434 static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
435                                     struct request_sock *req,
436                                     struct tcp_sock *newtp)
437 {
438 #if IS_ENABLED(CONFIG_SMC)
439         struct inet_request_sock *ireq;
440 
441         if (static_branch_unlikely(&tcp_have_smc)) {
442                 ireq = inet_rsk(req);
443                 if (oldtp->syn_smc && !ireq->smc_ok)
444                         newtp->syn_smc = 0;
445         }
446 #endif
447 }
448 
449 /* This is not only more efficient than what we used to do, it eliminates
450  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
451  *
452  * Actually, we could lots of memory writes here. tp of listening
453  * socket contains all necessary default parameters.
454  */
455 struct sock *tcp_create_openreq_child(const struct sock *sk,
456                                       struct request_sock *req,
457                                       struct sk_buff *skb)
458 {
459         struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
460         const struct inet_request_sock *ireq = inet_rsk(req);
461         struct tcp_request_sock *treq = tcp_rsk(req);
462         struct inet_connection_sock *newicsk;
463         struct tcp_sock *oldtp, *newtp;
464 
465         if (!newsk)
466                 return NULL;
467 
468         newicsk = inet_csk(newsk);
469         newtp = tcp_sk(newsk);
470         oldtp = tcp_sk(sk);
471 
472         smc_check_reset_syn_req(oldtp, req, newtp);
473 
474         /* Now setup tcp_sock */
475         newtp->pred_flags = 0;
476 
477         newtp->rcv_wup = newtp->copied_seq =
478         newtp->rcv_nxt = treq->rcv_isn + 1;
479         newtp->segs_in = 1;
480 
481         newtp->snd_sml = newtp->snd_una =
482         newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
483 
484         INIT_LIST_HEAD(&newtp->tsq_node);
485         INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
486 
487         tcp_init_wl(newtp, treq->rcv_isn);
488 
489         minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
490         newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
491 
492         newtp->lsndtime = tcp_jiffies32;
493         newsk->sk_txhash = treq->txhash;
494         newtp->total_retrans = req->num_retrans;
495 
496         tcp_init_xmit_timers(newsk);
497         newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
498 
499         if (sock_flag(newsk, SOCK_KEEPOPEN))
500                 inet_csk_reset_keepalive_timer(newsk,
501                                                keepalive_time_when(newtp));
502 
503         newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
504         newtp->rx_opt.sack_ok = ireq->sack_ok;
505         newtp->window_clamp = req->rsk_window_clamp;
506         newtp->rcv_ssthresh = req->rsk_rcv_wnd;
507         newtp->rcv_wnd = req->rsk_rcv_wnd;
508         newtp->rx_opt.wscale_ok = ireq->wscale_ok;
509         if (newtp->rx_opt.wscale_ok) {
510                 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
511                 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
512         } else {
513                 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
514                 newtp->window_clamp = min(newtp->window_clamp, 65535U);
515         }
516         newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
517         newtp->max_window = newtp->snd_wnd;
518 
519         if (newtp->rx_opt.tstamp_ok) {
520                 newtp->rx_opt.ts_recent = req->ts_recent;
521                 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
522                 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
523         } else {
524                 newtp->rx_opt.ts_recent_stamp = 0;
525                 newtp->tcp_header_len = sizeof(struct tcphdr);
526         }
527         if (req->num_timeout) {
528                 newtp->undo_marker = treq->snt_isn;
529                 newtp->retrans_stamp = div_u64(treq->snt_synack,
530                                                USEC_PER_SEC / TCP_TS_HZ);
531         }
532         newtp->tsoffset = treq->ts_off;
533 #ifdef CONFIG_TCP_MD5SIG
534         newtp->md5sig_info = NULL;      /*XXX*/
535         if (newtp->af_specific->md5_lookup(sk, newsk))
536                 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
537 #endif
538         if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
539                 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
540         newtp->rx_opt.mss_clamp = req->mss;
541         tcp_ecn_openreq_child(newtp, req);
542         newtp->fastopen_req = NULL;
543         newtp->fastopen_rsk = NULL;
544 
545         __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
546 
547         return newsk;
548 }
549 EXPORT_SYMBOL(tcp_create_openreq_child);
550 
551 /*
552  * Process an incoming packet for SYN_RECV sockets represented as a
553  * request_sock. Normally sk is the listener socket but for TFO it
554  * points to the child socket.
555  *
556  * XXX (TFO) - The current impl contains a special check for ack
557  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
558  *
559  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
560  */
561 
562 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
563                            struct request_sock *req,
564                            bool fastopen, bool *req_stolen)
565 {
566         struct tcp_options_received tmp_opt;
567         struct sock *child;
568         const struct tcphdr *th = tcp_hdr(skb);
569         __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
570         bool paws_reject = false;
571         bool own_req;
572 
573         tmp_opt.saw_tstamp = 0;
574         if (th->doff > (sizeof(struct tcphdr)>>2)) {
575                 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
576 
577                 if (tmp_opt.saw_tstamp) {
578                         tmp_opt.ts_recent = req->ts_recent;
579                         if (tmp_opt.rcv_tsecr)
580                                 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
581                         /* We do not store true stamp, but it is not required,
582                          * it can be estimated (approximately)
583                          * from another data.
584                          */
585                         tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
586                         paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
587                 }
588         }
589 
590         /* Check for pure retransmitted SYN. */
591         if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
592             flg == TCP_FLAG_SYN &&
593             !paws_reject) {
594                 /*
595                  * RFC793 draws (Incorrectly! It was fixed in RFC1122)
596                  * this case on figure 6 and figure 8, but formal
597                  * protocol description says NOTHING.
598                  * To be more exact, it says that we should send ACK,
599                  * because this segment (at least, if it has no data)
600                  * is out of window.
601                  *
602                  *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
603                  *  describe SYN-RECV state. All the description
604                  *  is wrong, we cannot believe to it and should
605                  *  rely only on common sense and implementation
606                  *  experience.
607                  *
608                  * Enforce "SYN-ACK" according to figure 8, figure 6
609                  * of RFC793, fixed by RFC1122.
610                  *
611                  * Note that even if there is new data in the SYN packet
612                  * they will be thrown away too.
613                  *
614                  * Reset timer after retransmitting SYNACK, similar to
615                  * the idea of fast retransmit in recovery.
616                  */
617                 if (!tcp_oow_rate_limited(sock_net(sk), skb,
618                                           LINUX_MIB_TCPACKSKIPPEDSYNRECV,
619                                           &tcp_rsk(req)->last_oow_ack_time) &&
620 
621                     !inet_rtx_syn_ack(sk, req)) {
622                         unsigned long expires = jiffies;
623 
624                         expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
625                                        TCP_RTO_MAX);
626                         if (!fastopen)
627                                 mod_timer_pending(&req->rsk_timer, expires);
628                         else
629                                 req->rsk_timer.expires = expires;
630                 }
631                 return NULL;
632         }
633 
634         /* Further reproduces section "SEGMENT ARRIVES"
635            for state SYN-RECEIVED of RFC793.
636            It is broken, however, it does not work only
637            when SYNs are crossed.
638 
639            You would think that SYN crossing is impossible here, since
640            we should have a SYN_SENT socket (from connect()) on our end,
641            but this is not true if the crossed SYNs were sent to both
642            ends by a malicious third party.  We must defend against this,
643            and to do that we first verify the ACK (as per RFC793, page
644            36) and reset if it is invalid.  Is this a true full defense?
645            To convince ourselves, let us consider a way in which the ACK
646            test can still pass in this 'malicious crossed SYNs' case.
647            Malicious sender sends identical SYNs (and thus identical sequence
648            numbers) to both A and B:
649 
650                 A: gets SYN, seq=7
651                 B: gets SYN, seq=7
652 
653            By our good fortune, both A and B select the same initial
654            send sequence number of seven :-)
655 
656                 A: sends SYN|ACK, seq=7, ack_seq=8
657                 B: sends SYN|ACK, seq=7, ack_seq=8
658 
659            So we are now A eating this SYN|ACK, ACK test passes.  So
660            does sequence test, SYN is truncated, and thus we consider
661            it a bare ACK.
662 
663            If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
664            bare ACK.  Otherwise, we create an established connection.  Both
665            ends (listening sockets) accept the new incoming connection and try
666            to talk to each other. 8-)
667 
668            Note: This case is both harmless, and rare.  Possibility is about the
669            same as us discovering intelligent life on another plant tomorrow.
670 
671            But generally, we should (RFC lies!) to accept ACK
672            from SYNACK both here and in tcp_rcv_state_process().
673            tcp_rcv_state_process() does not, hence, we do not too.
674 
675            Note that the case is absolutely generic:
676            we cannot optimize anything here without
677            violating protocol. All the checks must be made
678            before attempt to create socket.
679          */
680 
681         /* RFC793 page 36: "If the connection is in any non-synchronized state ...
682          *                  and the incoming segment acknowledges something not yet
683          *                  sent (the segment carries an unacceptable ACK) ...
684          *                  a reset is sent."
685          *
686          * Invalid ACK: reset will be sent by listening socket.
687          * Note that the ACK validity check for a Fast Open socket is done
688          * elsewhere and is checked directly against the child socket rather
689          * than req because user data may have been sent out.
690          */
691         if ((flg & TCP_FLAG_ACK) && !fastopen &&
692             (TCP_SKB_CB(skb)->ack_seq !=
693              tcp_rsk(req)->snt_isn + 1))
694                 return sk;
695 
696         /* Also, it would be not so bad idea to check rcv_tsecr, which
697          * is essentially ACK extension and too early or too late values
698          * should cause reset in unsynchronized states.
699          */
700 
701         /* RFC793: "first check sequence number". */
702 
703         if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
704                                           tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
705                 /* Out of window: send ACK and drop. */
706                 if (!(flg & TCP_FLAG_RST) &&
707                     !tcp_oow_rate_limited(sock_net(sk), skb,
708                                           LINUX_MIB_TCPACKSKIPPEDSYNRECV,
709                                           &tcp_rsk(req)->last_oow_ack_time))
710                         req->rsk_ops->send_ack(sk, skb, req);
711                 if (paws_reject)
712                         __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
713                 return NULL;
714         }
715 
716         /* In sequence, PAWS is OK. */
717 
718         if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
719                 req->ts_recent = tmp_opt.rcv_tsval;
720 
721         if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
722                 /* Truncate SYN, it is out of window starting
723                    at tcp_rsk(req)->rcv_isn + 1. */
724                 flg &= ~TCP_FLAG_SYN;
725         }
726 
727         /* RFC793: "second check the RST bit" and
728          *         "fourth, check the SYN bit"
729          */
730         if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
731                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
732                 goto embryonic_reset;
733         }
734 
735         /* ACK sequence verified above, just make sure ACK is
736          * set.  If ACK not set, just silently drop the packet.
737          *
738          * XXX (TFO) - if we ever allow "data after SYN", the
739          * following check needs to be removed.
740          */
741         if (!(flg & TCP_FLAG_ACK))
742                 return NULL;
743 
744         /* For Fast Open no more processing is needed (sk is the
745          * child socket).
746          */
747         if (fastopen)
748                 return sk;
749 
750         /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
751         if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
752             TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
753                 inet_rsk(req)->acked = 1;
754                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
755                 return NULL;
756         }
757 
758         /* OK, ACK is valid, create big socket and
759          * feed this segment to it. It will repeat all
760          * the tests. THIS SEGMENT MUST MOVE SOCKET TO
761          * ESTABLISHED STATE. If it will be dropped after
762          * socket is created, wait for troubles.
763          */
764         child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
765                                                          req, &own_req);
766         if (!child)
767                 goto listen_overflow;
768 
769         sock_rps_save_rxhash(child, skb);
770         tcp_synack_rtt_meas(child, req);
771         *req_stolen = !own_req;
772         return inet_csk_complete_hashdance(sk, child, req, own_req);
773 
774 listen_overflow:
775         if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
776                 inet_rsk(req)->acked = 1;
777                 return NULL;
778         }
779 
780 embryonic_reset:
781         if (!(flg & TCP_FLAG_RST)) {
782                 /* Received a bad SYN pkt - for TFO We try not to reset
783                  * the local connection unless it's really necessary to
784                  * avoid becoming vulnerable to outside attack aiming at
785                  * resetting legit local connections.
786                  */
787                 req->rsk_ops->send_reset(sk, skb);
788         } else if (fastopen) { /* received a valid RST pkt */
789                 reqsk_fastopen_remove(sk, req, true);
790                 tcp_reset(sk);
791         }
792         if (!fastopen) {
793                 inet_csk_reqsk_queue_drop(sk, req);
794                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
795         }
796         return NULL;
797 }
798 EXPORT_SYMBOL(tcp_check_req);
799 
800 /*
801  * Queue segment on the new socket if the new socket is active,
802  * otherwise we just shortcircuit this and continue with
803  * the new socket.
804  *
805  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
806  * when entering. But other states are possible due to a race condition
807  * where after __inet_lookup_established() fails but before the listener
808  * locked is obtained, other packets cause the same connection to
809  * be created.
810  */
811 
812 int tcp_child_process(struct sock *parent, struct sock *child,
813                       struct sk_buff *skb)
814 {
815         int ret = 0;
816         int state = child->sk_state;
817 
818         /* record NAPI ID of child */
819         sk_mark_napi_id(child, skb);
820 
821         tcp_segs_in(tcp_sk(child), skb);
822         if (!sock_owned_by_user(child)) {
823                 ret = tcp_rcv_state_process(child, skb);
824                 /* Wakeup parent, send SIGIO */
825                 if (state == TCP_SYN_RECV && child->sk_state != state)
826                         parent->sk_data_ready(parent);
827         } else {
828                 /* Alas, it is possible again, because we do lookup
829                  * in main socket hash table and lock on listening
830                  * socket does not protect us more.
831                  */
832                 __sk_add_backlog(child, skb);
833         }
834 
835         bh_unlock_sock(child);
836         sock_put(child);
837         return ret;
838 }
839 EXPORT_SYMBOL(tcp_child_process);
840 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp