~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/tcp_ipv4.c

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              Implementation of the Transmission Control Protocol(TCP).
  7  *
  8  *              IPv4 specific functions
  9  *
 10  *
 11  *              code split from:
 12  *              linux/ipv4/tcp.c
 13  *              linux/ipv4/tcp_input.c
 14  *              linux/ipv4/tcp_output.c
 15  *
 16  *              See tcp.c for author information
 17  *
 18  *      This program is free software; you can redistribute it and/or
 19  *      modify it under the terms of the GNU General Public License
 20  *      as published by the Free Software Foundation; either version
 21  *      2 of the License, or (at your option) any later version.
 22  */
 23 
 24 /*
 25  * Changes:
 26  *              David S. Miller :       New socket lookup architecture.
 27  *                                      This code is dedicated to John Dyson.
 28  *              David S. Miller :       Change semantics of established hash,
 29  *                                      half is devoted to TIME_WAIT sockets
 30  *                                      and the rest go in the other half.
 31  *              Andi Kleen :            Add support for syncookies and fixed
 32  *                                      some bugs: ip options weren't passed to
 33  *                                      the TCP layer, missed a check for an
 34  *                                      ACK bit.
 35  *              Andi Kleen :            Implemented fast path mtu discovery.
 36  *                                      Fixed many serious bugs in the
 37  *                                      request_sock handling and moved
 38  *                                      most of it into the af independent code.
 39  *                                      Added tail drop and some other bugfixes.
 40  *                                      Added new listen semantics.
 41  *              Mike McLagan    :       Routing by source
 42  *      Juan Jose Ciarlante:            ip_dynaddr bits
 43  *              Andi Kleen:             various fixes.
 44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
 45  *                                      coma.
 46  *      Andi Kleen              :       Fix new listen.
 47  *      Andi Kleen              :       Fix accept error reporting.
 48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 50  *                                      a single port at the same time.
 51  */
 52 
 53 #define pr_fmt(fmt) "TCP: " fmt
 54 
 55 #include <linux/bottom_half.h>
 56 #include <linux/types.h>
 57 #include <linux/fcntl.h>
 58 #include <linux/module.h>
 59 #include <linux/random.h>
 60 #include <linux/cache.h>
 61 #include <linux/jhash.h>
 62 #include <linux/init.h>
 63 #include <linux/times.h>
 64 #include <linux/slab.h>
 65 
 66 #include <net/net_namespace.h>
 67 #include <net/icmp.h>
 68 #include <net/inet_hashtables.h>
 69 #include <net/tcp.h>
 70 #include <net/transp_v6.h>
 71 #include <net/ipv6.h>
 72 #include <net/inet_common.h>
 73 #include <net/timewait_sock.h>
 74 #include <net/xfrm.h>
 75 #include <net/secure_seq.h>
 76 #include <net/tcp_memcontrol.h>
 77 #include <net/busy_poll.h>
 78 
 79 #include <linux/inet.h>
 80 #include <linux/ipv6.h>
 81 #include <linux/stddef.h>
 82 #include <linux/proc_fs.h>
 83 #include <linux/seq_file.h>
 84 
 85 #include <linux/crypto.h>
 86 #include <linux/scatterlist.h>
 87 
 88 int sysctl_tcp_tw_reuse __read_mostly;
 89 int sysctl_tcp_low_latency __read_mostly;
 90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
 91 
 92 #ifdef CONFIG_TCP_MD5SIG
 93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
 95 #endif
 96 
 97 struct inet_hashinfo tcp_hashinfo;
 98 EXPORT_SYMBOL(tcp_hashinfo);
 99 
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107 
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112 
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120 
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (!twp || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135 
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152 
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155 
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158 
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167 
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181 
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186 
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189 
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201 
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206         inet->inet_dport = usin->sin_port;
207         sk_daddr_set(sk, daddr);
208 
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224 
225         sk_set_txhash(sk);
226 
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237 
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243 
244         inet->inet_id = tp->write_seq ^ jiffies;
245 
246         err = tcp_connect(sk);
247 
248         rt = NULL;
249         if (err)
250                 goto failure;
251 
252         return 0;
253 
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277 
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281 
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287 
288         mtu = dst_mtu(dst);
289 
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294 
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317         struct request_sock *req = inet_reqsk(sk);
318         struct net *net = sock_net(sk);
319 
320         /* ICMPs are not backlogged, hence we cannot get
321          * an established socket here.
322          */
323         WARN_ON(req->sk);
324 
325         if (seq != tcp_rsk(req)->snt_isn) {
326                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327                 reqsk_put(req);
328         } else {
329                 /*
330                  * Still in SYN_RECV, just remove it silently.
331                  * There is no good way to pass the error to the newly
332                  * created socket, and POSIX does not want network
333                  * errors returned from accept().
334                  */
335                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337         }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361         struct inet_connection_sock *icsk;
362         struct tcp_sock *tp;
363         struct inet_sock *inet;
364         const int type = icmp_hdr(icmp_skb)->type;
365         const int code = icmp_hdr(icmp_skb)->code;
366         struct sock *sk;
367         struct sk_buff *skb;
368         struct request_sock *fastopen;
369         __u32 seq, snd_una;
370         __u32 remaining;
371         int err;
372         struct net *net = dev_net(icmp_skb->dev);
373 
374         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375                                        th->dest, iph->saddr, ntohs(th->source),
376                                        inet_iif(icmp_skb));
377         if (!sk) {
378                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379                 return;
380         }
381         if (sk->sk_state == TCP_TIME_WAIT) {
382                 inet_twsk_put(inet_twsk(sk));
383                 return;
384         }
385         seq = ntohl(th->seq);
386         if (sk->sk_state == TCP_NEW_SYN_RECV)
387                 return tcp_req_err(sk, seq);
388 
389         bh_lock_sock(sk);
390         /* If too many ICMPs get dropped on busy
391          * servers this needs to be solved differently.
392          * We do take care of PMTU discovery (RFC1191) special case :
393          * we can receive locally generated ICMP messages while socket is held.
394          */
395         if (sock_owned_by_user(sk)) {
396                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398         }
399         if (sk->sk_state == TCP_CLOSE)
400                 goto out;
401 
402         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404                 goto out;
405         }
406 
407         icsk = inet_csk(sk);
408         tp = tcp_sk(sk);
409         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410         fastopen = tp->fastopen_rsk;
411         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412         if (sk->sk_state != TCP_LISTEN &&
413             !between(seq, snd_una, tp->snd_nxt)) {
414                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415                 goto out;
416         }
417 
418         switch (type) {
419         case ICMP_REDIRECT:
420                 do_redirect(icmp_skb, sk);
421                 goto out;
422         case ICMP_SOURCE_QUENCH:
423                 /* Just silently ignore these. */
424                 goto out;
425         case ICMP_PARAMETERPROB:
426                 err = EPROTO;
427                 break;
428         case ICMP_DEST_UNREACH:
429                 if (code > NR_ICMP_UNREACH)
430                         goto out;
431 
432                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433                         /* We are not interested in TCP_LISTEN and open_requests
434                          * (SYN-ACKs send out by Linux are always <576bytes so
435                          * they should go through unfragmented).
436                          */
437                         if (sk->sk_state == TCP_LISTEN)
438                                 goto out;
439 
440                         tp->mtu_info = info;
441                         if (!sock_owned_by_user(sk)) {
442                                 tcp_v4_mtu_reduced(sk);
443                         } else {
444                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445                                         sock_hold(sk);
446                         }
447                         goto out;
448                 }
449 
450                 err = icmp_err_convert[code].errno;
451                 /* check if icmp_skb allows revert of backoff
452                  * (see draft-zimmermann-tcp-lcd) */
453                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454                         break;
455                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456                     !icsk->icsk_backoff || fastopen)
457                         break;
458 
459                 if (sock_owned_by_user(sk))
460                         break;
461 
462                 icsk->icsk_backoff--;
463                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464                                                TCP_TIMEOUT_INIT;
465                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467                 skb = tcp_write_queue_head(sk);
468                 BUG_ON(!skb);
469 
470                 remaining = icsk->icsk_rto -
471                             min(icsk->icsk_rto,
472                                 tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474                 if (remaining) {
475                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476                                                   remaining, TCP_RTO_MAX);
477                 } else {
478                         /* RTO revert clocked out retransmission.
479                          * Will retransmit now */
480                         tcp_retransmit_timer(sk);
481                 }
482 
483                 break;
484         case ICMP_TIME_EXCEEDED:
485                 err = EHOSTUNREACH;
486                 break;
487         default:
488                 goto out;
489         }
490 
491         switch (sk->sk_state) {
492         case TCP_SYN_SENT:
493         case TCP_SYN_RECV:
494                 /* Only in fast or simultaneous open. If a fast open socket is
495                  * is already accepted it is treated as a connected one below.
496                  */
497                 if (fastopen && !fastopen->sk)
498                         break;
499 
500                 if (!sock_owned_by_user(sk)) {
501                         sk->sk_err = err;
502 
503                         sk->sk_error_report(sk);
504 
505                         tcp_done(sk);
506                 } else {
507                         sk->sk_err_soft = err;
508                 }
509                 goto out;
510         }
511 
512         /* If we've already connected we will keep trying
513          * until we time out, or the user gives up.
514          *
515          * rfc1122 4.2.3.9 allows to consider as hard errors
516          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517          * but it is obsoleted by pmtu discovery).
518          *
519          * Note, that in modern internet, where routing is unreliable
520          * and in each dark corner broken firewalls sit, sending random
521          * errors ordered by their masters even this two messages finally lose
522          * their original sense (even Linux sends invalid PORT_UNREACHs)
523          *
524          * Now we are in compliance with RFCs.
525          *                                                      --ANK (980905)
526          */
527 
528         inet = inet_sk(sk);
529         if (!sock_owned_by_user(sk) && inet->recverr) {
530                 sk->sk_err = err;
531                 sk->sk_error_report(sk);
532         } else  { /* Only an error on timeout */
533                 sk->sk_err_soft = err;
534         }
535 
536 out:
537         bh_unlock_sock(sk);
538         sock_put(sk);
539 }
540 
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543         struct tcphdr *th = tcp_hdr(skb);
544 
545         if (skb->ip_summed == CHECKSUM_PARTIAL) {
546                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547                 skb->csum_start = skb_transport_header(skb) - skb->head;
548                 skb->csum_offset = offsetof(struct tcphdr, check);
549         } else {
550                 th->check = tcp_v4_check(skb->len, saddr, daddr,
551                                          csum_partial(th,
552                                                       th->doff << 2,
553                                                       skb->csum));
554         }
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560         const struct inet_sock *inet = inet_sk(sk);
561 
562         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *      This routine will send an RST to the other tcp.
568  *
569  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *                    for reset.
571  *      Answer: if a packet caused RST, it is not for a socket
572  *              existing in our system, if it is matched to a socket,
573  *              it is just duplicate segment or bug in other side's TCP.
574  *              So that we build reply only basing on parameters
575  *              arrived with segment.
576  *      Exception: precedence violation. We do not implement it in any case.
577  */
578 
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581         const struct tcphdr *th = tcp_hdr(skb);
582         struct {
583                 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587         } rep;
588         struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590         struct tcp_md5sig_key *key;
591         const __u8 *hash_location = NULL;
592         unsigned char newhash[16];
593         int genhash;
594         struct sock *sk1 = NULL;
595 #endif
596         struct net *net;
597 
598         /* Never send a reset in response to a reset. */
599         if (th->rst)
600                 return;
601 
602         /* If sk not NULL, it means we did a successful lookup and incoming
603          * route had to be correct. prequeue might have dropped our dst.
604          */
605         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606                 return;
607 
608         /* Swap the send and the receive. */
609         memset(&rep, 0, sizeof(rep));
610         rep.th.dest   = th->source;
611         rep.th.source = th->dest;
612         rep.th.doff   = sizeof(struct tcphdr) / 4;
613         rep.th.rst    = 1;
614 
615         if (th->ack) {
616                 rep.th.seq = th->ack_seq;
617         } else {
618                 rep.th.ack = 1;
619                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620                                        skb->len - (th->doff << 2));
621         }
622 
623         memset(&arg, 0, sizeof(arg));
624         arg.iov[0].iov_base = (unsigned char *)&rep;
625         arg.iov[0].iov_len  = sizeof(rep.th);
626 
627         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629         hash_location = tcp_parse_md5sig_option(th);
630         if (!sk && hash_location) {
631                 /*
632                  * active side is lost. Try to find listening socket through
633                  * source port, and then find md5 key through listening socket.
634                  * we are not loose security here:
635                  * Incoming packet is checked with md5 hash with finding key,
636                  * no RST generated if md5 hash doesn't match.
637                  */
638                 sk1 = __inet_lookup_listener(net,
639                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
640                                              th->source, ip_hdr(skb)->daddr,
641                                              ntohs(th->source), inet_iif(skb));
642                 /* don't send rst if it can't find key */
643                 if (!sk1)
644                         return;
645                 rcu_read_lock();
646                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647                                         &ip_hdr(skb)->saddr, AF_INET);
648                 if (!key)
649                         goto release_sk1;
650 
651                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653                         goto release_sk1;
654         } else {
655                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656                                              &ip_hdr(skb)->saddr,
657                                              AF_INET) : NULL;
658         }
659 
660         if (key) {
661                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662                                    (TCPOPT_NOP << 16) |
663                                    (TCPOPT_MD5SIG << 8) |
664                                    TCPOLEN_MD5SIG);
665                 /* Update length and the length the header thinks exists */
666                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667                 rep.th.doff = arg.iov[0].iov_len / 4;
668 
669                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670                                      key, ip_hdr(skb)->saddr,
671                                      ip_hdr(skb)->daddr, &rep.th);
672         }
673 #endif
674         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675                                       ip_hdr(skb)->saddr, /* XXX */
676                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
677         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679         /* When socket is gone, all binding information is lost.
680          * routing might fail in this case. No choice here, if we choose to force
681          * input interface, we will misroute in case of asymmetric route.
682          */
683         if (sk)
684                 arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686         arg.tos = ip_hdr(skb)->tos;
687         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
689                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690                               &arg, arg.iov[0].iov_len);
691 
692         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697         if (sk1) {
698                 rcu_read_unlock();
699                 sock_put(sk1);
700         }
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709                             u32 win, u32 tsval, u32 tsecr, int oif,
710                             struct tcp_md5sig_key *key,
711                             int reply_flags, u8 tos)
712 {
713         const struct tcphdr *th = tcp_hdr(skb);
714         struct {
715                 struct tcphdr th;
716                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720                         ];
721         } rep;
722         struct ip_reply_arg arg;
723         struct net *net = dev_net(skb_dst(skb)->dev);
724 
725         memset(&rep.th, 0, sizeof(struct tcphdr));
726         memset(&arg, 0, sizeof(arg));
727 
728         arg.iov[0].iov_base = (unsigned char *)&rep;
729         arg.iov[0].iov_len  = sizeof(rep.th);
730         if (tsecr) {
731                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732                                    (TCPOPT_TIMESTAMP << 8) |
733                                    TCPOLEN_TIMESTAMP);
734                 rep.opt[1] = htonl(tsval);
735                 rep.opt[2] = htonl(tsecr);
736                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737         }
738 
739         /* Swap the send and the receive. */
740         rep.th.dest    = th->source;
741         rep.th.source  = th->dest;
742         rep.th.doff    = arg.iov[0].iov_len / 4;
743         rep.th.seq     = htonl(seq);
744         rep.th.ack_seq = htonl(ack);
745         rep.th.ack     = 1;
746         rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749         if (key) {
750                 int offset = (tsecr) ? 3 : 0;
751 
752                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753                                           (TCPOPT_NOP << 16) |
754                                           (TCPOPT_MD5SIG << 8) |
755                                           TCPOLEN_MD5SIG);
756                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757                 rep.th.doff = arg.iov[0].iov_len/4;
758 
759                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760                                     key, ip_hdr(skb)->saddr,
761                                     ip_hdr(skb)->daddr, &rep.th);
762         }
763 #endif
764         arg.flags = reply_flags;
765         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766                                       ip_hdr(skb)->saddr, /* XXX */
767                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
768         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769         if (oif)
770                 arg.bound_dev_if = oif;
771         arg.tos = tos;
772         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
774                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775                               &arg, arg.iov[0].iov_len);
776 
777         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782         struct inet_timewait_sock *tw = inet_twsk(sk);
783         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787                         tcp_time_stamp + tcptw->tw_ts_offset,
788                         tcptw->tw_ts_recent,
789                         tw->tw_bound_dev_if,
790                         tcp_twsk_md5_key(tcptw),
791                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792                         tw->tw_tos
793                         );
794 
795         inet_twsk_put(tw);
796 }
797 
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799                                   struct request_sock *req)
800 {
801         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803          */
804         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807                         tcp_time_stamp,
808                         req->ts_recent,
809                         0,
810                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811                                           AF_INET),
812                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813                         ip_hdr(skb)->tos);
814 }
815 
816 /*
817  *      Send a SYN-ACK after having received a SYN.
818  *      This still operates on a request_sock only, not on a big
819  *      socket.
820  */
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822                               struct flowi *fl,
823                               struct request_sock *req,
824                               u16 queue_mapping,
825                               struct tcp_fastopen_cookie *foc)
826 {
827         const struct inet_request_sock *ireq = inet_rsk(req);
828         struct flowi4 fl4;
829         int err = -1;
830         struct sk_buff *skb;
831 
832         /* First, grab a route. */
833         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834                 return -1;
835 
836         skb = tcp_make_synack(sk, dst, req, foc);
837 
838         if (skb) {
839                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841                 skb_set_queue_mapping(skb, queue_mapping);
842                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843                                             ireq->ir_rmt_addr,
844                                             ireq->opt);
845                 err = net_xmit_eval(err);
846         }
847 
848         return err;
849 }
850 
851 /*
852  *      IPv4 request_sock destructor.
853  */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856         kfree(inet_rsk(req)->opt);
857 }
858 
859 
860 #ifdef CONFIG_TCP_MD5SIG
861 /*
862  * RFC2385 MD5 checksumming requires a mapping of
863  * IP address->MD5 Key.
864  * We need to maintain these in the sk structure.
865  */
866 
867 /* Find the Key structure for an address.  */
868 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
869                                          const union tcp_md5_addr *addr,
870                                          int family)
871 {
872         const struct tcp_sock *tp = tcp_sk(sk);
873         struct tcp_md5sig_key *key;
874         unsigned int size = sizeof(struct in_addr);
875         const struct tcp_md5sig_info *md5sig;
876 
877         /* caller either holds rcu_read_lock() or socket lock */
878         md5sig = rcu_dereference_check(tp->md5sig_info,
879                                        sock_owned_by_user(sk) ||
880                                        lockdep_is_held(&sk->sk_lock.slock));
881         if (!md5sig)
882                 return NULL;
883 #if IS_ENABLED(CONFIG_IPV6)
884         if (family == AF_INET6)
885                 size = sizeof(struct in6_addr);
886 #endif
887         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
888                 if (key->family != family)
889                         continue;
890                 if (!memcmp(&key->addr, addr, size))
891                         return key;
892         }
893         return NULL;
894 }
895 EXPORT_SYMBOL(tcp_md5_do_lookup);
896 
897 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
898                                          const struct sock *addr_sk)
899 {
900         const union tcp_md5_addr *addr;
901 
902         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
903         return tcp_md5_do_lookup(sk, addr, AF_INET);
904 }
905 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906 
907 /* This can be called on a newly created socket, from other files */
908 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910 {
911         /* Add Key to the list */
912         struct tcp_md5sig_key *key;
913         struct tcp_sock *tp = tcp_sk(sk);
914         struct tcp_md5sig_info *md5sig;
915 
916         key = tcp_md5_do_lookup(sk, addr, family);
917         if (key) {
918                 /* Pre-existing entry - just update that one. */
919                 memcpy(key->key, newkey, newkeylen);
920                 key->keylen = newkeylen;
921                 return 0;
922         }
923 
924         md5sig = rcu_dereference_protected(tp->md5sig_info,
925                                            sock_owned_by_user(sk) ||
926                                            lockdep_is_held(&sk->sk_lock.slock));
927         if (!md5sig) {
928                 md5sig = kmalloc(sizeof(*md5sig), gfp);
929                 if (!md5sig)
930                         return -ENOMEM;
931 
932                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
933                 INIT_HLIST_HEAD(&md5sig->head);
934                 rcu_assign_pointer(tp->md5sig_info, md5sig);
935         }
936 
937         key = sock_kmalloc(sk, sizeof(*key), gfp);
938         if (!key)
939                 return -ENOMEM;
940         if (!tcp_alloc_md5sig_pool()) {
941                 sock_kfree_s(sk, key, sizeof(*key));
942                 return -ENOMEM;
943         }
944 
945         memcpy(key->key, newkey, newkeylen);
946         key->keylen = newkeylen;
947         key->family = family;
948         memcpy(&key->addr, addr,
949                (family == AF_INET6) ? sizeof(struct in6_addr) :
950                                       sizeof(struct in_addr));
951         hlist_add_head_rcu(&key->node, &md5sig->head);
952         return 0;
953 }
954 EXPORT_SYMBOL(tcp_md5_do_add);
955 
956 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
957 {
958         struct tcp_md5sig_key *key;
959 
960         key = tcp_md5_do_lookup(sk, addr, family);
961         if (!key)
962                 return -ENOENT;
963         hlist_del_rcu(&key->node);
964         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
965         kfree_rcu(key, rcu);
966         return 0;
967 }
968 EXPORT_SYMBOL(tcp_md5_do_del);
969 
970 static void tcp_clear_md5_list(struct sock *sk)
971 {
972         struct tcp_sock *tp = tcp_sk(sk);
973         struct tcp_md5sig_key *key;
974         struct hlist_node *n;
975         struct tcp_md5sig_info *md5sig;
976 
977         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
978 
979         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
980                 hlist_del_rcu(&key->node);
981                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
982                 kfree_rcu(key, rcu);
983         }
984 }
985 
986 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
987                                  int optlen)
988 {
989         struct tcp_md5sig cmd;
990         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
991 
992         if (optlen < sizeof(cmd))
993                 return -EINVAL;
994 
995         if (copy_from_user(&cmd, optval, sizeof(cmd)))
996                 return -EFAULT;
997 
998         if (sin->sin_family != AF_INET)
999                 return -EINVAL;
1000 
1001         if (!cmd.tcpm_keylen)
1002                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1003                                       AF_INET);
1004 
1005         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1006                 return -EINVAL;
1007 
1008         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1009                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1010                               GFP_KERNEL);
1011 }
1012 
1013 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1014                                         __be32 daddr, __be32 saddr, int nbytes)
1015 {
1016         struct tcp4_pseudohdr *bp;
1017         struct scatterlist sg;
1018 
1019         bp = &hp->md5_blk.ip4;
1020 
1021         /*
1022          * 1. the TCP pseudo-header (in the order: source IP address,
1023          * destination IP address, zero-padded protocol number, and
1024          * segment length)
1025          */
1026         bp->saddr = saddr;
1027         bp->daddr = daddr;
1028         bp->pad = 0;
1029         bp->protocol = IPPROTO_TCP;
1030         bp->len = cpu_to_be16(nbytes);
1031 
1032         sg_init_one(&sg, bp, sizeof(*bp));
1033         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1034 }
1035 
1036 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1037                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1038 {
1039         struct tcp_md5sig_pool *hp;
1040         struct hash_desc *desc;
1041 
1042         hp = tcp_get_md5sig_pool();
1043         if (!hp)
1044                 goto clear_hash_noput;
1045         desc = &hp->md5_desc;
1046 
1047         if (crypto_hash_init(desc))
1048                 goto clear_hash;
1049         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1050                 goto clear_hash;
1051         if (tcp_md5_hash_header(hp, th))
1052                 goto clear_hash;
1053         if (tcp_md5_hash_key(hp, key))
1054                 goto clear_hash;
1055         if (crypto_hash_final(desc, md5_hash))
1056                 goto clear_hash;
1057 
1058         tcp_put_md5sig_pool();
1059         return 0;
1060 
1061 clear_hash:
1062         tcp_put_md5sig_pool();
1063 clear_hash_noput:
1064         memset(md5_hash, 0, 16);
1065         return 1;
1066 }
1067 
1068 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1069                         const struct sock *sk,
1070                         const struct sk_buff *skb)
1071 {
1072         struct tcp_md5sig_pool *hp;
1073         struct hash_desc *desc;
1074         const struct tcphdr *th = tcp_hdr(skb);
1075         __be32 saddr, daddr;
1076 
1077         if (sk) { /* valid for establish/request sockets */
1078                 saddr = sk->sk_rcv_saddr;
1079                 daddr = sk->sk_daddr;
1080         } else {
1081                 const struct iphdr *iph = ip_hdr(skb);
1082                 saddr = iph->saddr;
1083                 daddr = iph->daddr;
1084         }
1085 
1086         hp = tcp_get_md5sig_pool();
1087         if (!hp)
1088                 goto clear_hash_noput;
1089         desc = &hp->md5_desc;
1090 
1091         if (crypto_hash_init(desc))
1092                 goto clear_hash;
1093 
1094         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1095                 goto clear_hash;
1096         if (tcp_md5_hash_header(hp, th))
1097                 goto clear_hash;
1098         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1099                 goto clear_hash;
1100         if (tcp_md5_hash_key(hp, key))
1101                 goto clear_hash;
1102         if (crypto_hash_final(desc, md5_hash))
1103                 goto clear_hash;
1104 
1105         tcp_put_md5sig_pool();
1106         return 0;
1107 
1108 clear_hash:
1109         tcp_put_md5sig_pool();
1110 clear_hash_noput:
1111         memset(md5_hash, 0, 16);
1112         return 1;
1113 }
1114 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1115 
1116 /* Called with rcu_read_lock() */
1117 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1118                                     const struct sk_buff *skb)
1119 {
1120         /*
1121          * This gets called for each TCP segment that arrives
1122          * so we want to be efficient.
1123          * We have 3 drop cases:
1124          * o No MD5 hash and one expected.
1125          * o MD5 hash and we're not expecting one.
1126          * o MD5 hash and its wrong.
1127          */
1128         const __u8 *hash_location = NULL;
1129         struct tcp_md5sig_key *hash_expected;
1130         const struct iphdr *iph = ip_hdr(skb);
1131         const struct tcphdr *th = tcp_hdr(skb);
1132         int genhash;
1133         unsigned char newhash[16];
1134 
1135         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1136                                           AF_INET);
1137         hash_location = tcp_parse_md5sig_option(th);
1138 
1139         /* We've parsed the options - do we have a hash? */
1140         if (!hash_expected && !hash_location)
1141                 return false;
1142 
1143         if (hash_expected && !hash_location) {
1144                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1145                 return true;
1146         }
1147 
1148         if (!hash_expected && hash_location) {
1149                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1150                 return true;
1151         }
1152 
1153         /* Okay, so this is hash_expected and hash_location -
1154          * so we need to calculate the checksum.
1155          */
1156         genhash = tcp_v4_md5_hash_skb(newhash,
1157                                       hash_expected,
1158                                       NULL, skb);
1159 
1160         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1161                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1162                                      &iph->saddr, ntohs(th->source),
1163                                      &iph->daddr, ntohs(th->dest),
1164                                      genhash ? " tcp_v4_calc_md5_hash failed"
1165                                      : "");
1166                 return true;
1167         }
1168         return false;
1169 }
1170 #endif
1171 
1172 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1173                             struct sk_buff *skb)
1174 {
1175         struct inet_request_sock *ireq = inet_rsk(req);
1176 
1177         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1178         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1179         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1180         ireq->opt = tcp_v4_save_options(skb);
1181 }
1182 
1183 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1184                                           const struct request_sock *req,
1185                                           bool *strict)
1186 {
1187         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1188 
1189         if (strict) {
1190                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1191                         *strict = true;
1192                 else
1193                         *strict = false;
1194         }
1195 
1196         return dst;
1197 }
1198 
1199 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1200         .family         =       PF_INET,
1201         .obj_size       =       sizeof(struct tcp_request_sock),
1202         .rtx_syn_ack    =       tcp_rtx_synack,
1203         .send_ack       =       tcp_v4_reqsk_send_ack,
1204         .destructor     =       tcp_v4_reqsk_destructor,
1205         .send_reset     =       tcp_v4_send_reset,
1206         .syn_ack_timeout =      tcp_syn_ack_timeout,
1207 };
1208 
1209 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1210         .mss_clamp      =       TCP_MSS_DEFAULT,
1211 #ifdef CONFIG_TCP_MD5SIG
1212         .req_md5_lookup =       tcp_v4_md5_lookup,
1213         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1214 #endif
1215         .init_req       =       tcp_v4_init_req,
1216 #ifdef CONFIG_SYN_COOKIES
1217         .cookie_init_seq =      cookie_v4_init_sequence,
1218 #endif
1219         .route_req      =       tcp_v4_route_req,
1220         .init_seq       =       tcp_v4_init_sequence,
1221         .send_synack    =       tcp_v4_send_synack,
1222         .queue_hash_add =       inet_csk_reqsk_queue_hash_add,
1223 };
1224 
1225 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1226 {
1227         /* Never answer to SYNs send to broadcast or multicast */
1228         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1229                 goto drop;
1230 
1231         return tcp_conn_request(&tcp_request_sock_ops,
1232                                 &tcp_request_sock_ipv4_ops, sk, skb);
1233 
1234 drop:
1235         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1236         return 0;
1237 }
1238 EXPORT_SYMBOL(tcp_v4_conn_request);
1239 
1240 
1241 /*
1242  * The three way handshake has completed - we got a valid synack -
1243  * now create the new socket.
1244  */
1245 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1246                                   struct request_sock *req,
1247                                   struct dst_entry *dst)
1248 {
1249         struct inet_request_sock *ireq;
1250         struct inet_sock *newinet;
1251         struct tcp_sock *newtp;
1252         struct sock *newsk;
1253 #ifdef CONFIG_TCP_MD5SIG
1254         struct tcp_md5sig_key *key;
1255 #endif
1256         struct ip_options_rcu *inet_opt;
1257 
1258         if (sk_acceptq_is_full(sk))
1259                 goto exit_overflow;
1260 
1261         newsk = tcp_create_openreq_child(sk, req, skb);
1262         if (!newsk)
1263                 goto exit_nonewsk;
1264 
1265         newsk->sk_gso_type = SKB_GSO_TCPV4;
1266         inet_sk_rx_dst_set(newsk, skb);
1267 
1268         newtp                 = tcp_sk(newsk);
1269         newinet               = inet_sk(newsk);
1270         ireq                  = inet_rsk(req);
1271         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1272         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1273         newinet->inet_saddr           = ireq->ir_loc_addr;
1274         inet_opt              = ireq->opt;
1275         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1276         ireq->opt             = NULL;
1277         newinet->mc_index     = inet_iif(skb);
1278         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1279         newinet->rcv_tos      = ip_hdr(skb)->tos;
1280         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1281         sk_set_txhash(newsk);
1282         if (inet_opt)
1283                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1284         newinet->inet_id = newtp->write_seq ^ jiffies;
1285 
1286         if (!dst) {
1287                 dst = inet_csk_route_child_sock(sk, newsk, req);
1288                 if (!dst)
1289                         goto put_and_exit;
1290         } else {
1291                 /* syncookie case : see end of cookie_v4_check() */
1292         }
1293         sk_setup_caps(newsk, dst);
1294 
1295         tcp_ca_openreq_child(newsk, dst);
1296 
1297         tcp_sync_mss(newsk, dst_mtu(dst));
1298         newtp->advmss = dst_metric_advmss(dst);
1299         if (tcp_sk(sk)->rx_opt.user_mss &&
1300             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1301                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1302 
1303         tcp_initialize_rcv_mss(newsk);
1304 
1305 #ifdef CONFIG_TCP_MD5SIG
1306         /* Copy over the MD5 key from the original socket */
1307         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1308                                 AF_INET);
1309         if (key) {
1310                 /*
1311                  * We're using one, so create a matching key
1312                  * on the newsk structure. If we fail to get
1313                  * memory, then we end up not copying the key
1314                  * across. Shucks.
1315                  */
1316                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1317                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1318                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1319         }
1320 #endif
1321 
1322         if (__inet_inherit_port(sk, newsk) < 0)
1323                 goto put_and_exit;
1324         __inet_hash_nolisten(newsk, NULL);
1325 
1326         return newsk;
1327 
1328 exit_overflow:
1329         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1330 exit_nonewsk:
1331         dst_release(dst);
1332 exit:
1333         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1334         return NULL;
1335 put_and_exit:
1336         inet_csk_prepare_forced_close(newsk);
1337         tcp_done(newsk);
1338         goto exit;
1339 }
1340 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1341 
1342 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1343 {
1344         const struct tcphdr *th = tcp_hdr(skb);
1345         const struct iphdr *iph = ip_hdr(skb);
1346         struct request_sock *req;
1347         struct sock *nsk;
1348 
1349         req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1350         if (req) {
1351                 nsk = tcp_check_req(sk, skb, req, false);
1352                 if (!nsk || nsk == sk)
1353                         reqsk_put(req);
1354                 return nsk;
1355         }
1356 
1357         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1358                         th->source, iph->daddr, th->dest, inet_iif(skb));
1359 
1360         if (nsk) {
1361                 if (nsk->sk_state != TCP_TIME_WAIT) {
1362                         bh_lock_sock(nsk);
1363                         return nsk;
1364                 }
1365                 inet_twsk_put(inet_twsk(nsk));
1366                 return NULL;
1367         }
1368 
1369 #ifdef CONFIG_SYN_COOKIES
1370         if (!th->syn)
1371                 sk = cookie_v4_check(sk, skb);
1372 #endif
1373         return sk;
1374 }
1375 
1376 /* The socket must have it's spinlock held when we get
1377  * here.
1378  *
1379  * We have a potential double-lock case here, so even when
1380  * doing backlog processing we use the BH locking scheme.
1381  * This is because we cannot sleep with the original spinlock
1382  * held.
1383  */
1384 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1385 {
1386         struct sock *rsk;
1387 
1388         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1389                 struct dst_entry *dst = sk->sk_rx_dst;
1390 
1391                 sock_rps_save_rxhash(sk, skb);
1392                 sk_mark_napi_id(sk, skb);
1393                 if (dst) {
1394                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1395                             !dst->ops->check(dst, 0)) {
1396                                 dst_release(dst);
1397                                 sk->sk_rx_dst = NULL;
1398                         }
1399                 }
1400                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1401                 return 0;
1402         }
1403 
1404         if (tcp_checksum_complete(skb))
1405                 goto csum_err;
1406 
1407         if (sk->sk_state == TCP_LISTEN) {
1408                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1409                 if (!nsk)
1410                         goto discard;
1411 
1412                 if (nsk != sk) {
1413                         sock_rps_save_rxhash(nsk, skb);
1414                         sk_mark_napi_id(sk, skb);
1415                         if (tcp_child_process(sk, nsk, skb)) {
1416                                 rsk = nsk;
1417                                 goto reset;
1418                         }
1419                         return 0;
1420                 }
1421         } else
1422                 sock_rps_save_rxhash(sk, skb);
1423 
1424         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1425                 rsk = sk;
1426                 goto reset;
1427         }
1428         return 0;
1429 
1430 reset:
1431         tcp_v4_send_reset(rsk, skb);
1432 discard:
1433         kfree_skb(skb);
1434         /* Be careful here. If this function gets more complicated and
1435          * gcc suffers from register pressure on the x86, sk (in %ebx)
1436          * might be destroyed here. This current version compiles correctly,
1437          * but you have been warned.
1438          */
1439         return 0;
1440 
1441 csum_err:
1442         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1443         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1444         goto discard;
1445 }
1446 EXPORT_SYMBOL(tcp_v4_do_rcv);
1447 
1448 void tcp_v4_early_demux(struct sk_buff *skb)
1449 {
1450         const struct iphdr *iph;
1451         const struct tcphdr *th;
1452         struct sock *sk;
1453 
1454         if (skb->pkt_type != PACKET_HOST)
1455                 return;
1456 
1457         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1458                 return;
1459 
1460         iph = ip_hdr(skb);
1461         th = tcp_hdr(skb);
1462 
1463         if (th->doff < sizeof(struct tcphdr) / 4)
1464                 return;
1465 
1466         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1467                                        iph->saddr, th->source,
1468                                        iph->daddr, ntohs(th->dest),
1469                                        skb->skb_iif);
1470         if (sk) {
1471                 skb->sk = sk;
1472                 skb->destructor = sock_edemux;
1473                 if (sk_fullsock(sk)) {
1474                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1475 
1476                         if (dst)
1477                                 dst = dst_check(dst, 0);
1478                         if (dst &&
1479                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1480                                 skb_dst_set_noref(skb, dst);
1481                 }
1482         }
1483 }
1484 
1485 /* Packet is added to VJ-style prequeue for processing in process
1486  * context, if a reader task is waiting. Apparently, this exciting
1487  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1488  * failed somewhere. Latency? Burstiness? Well, at least now we will
1489  * see, why it failed. 8)8)                               --ANK
1490  *
1491  */
1492 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1493 {
1494         struct tcp_sock *tp = tcp_sk(sk);
1495 
1496         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1497                 return false;
1498 
1499         if (skb->len <= tcp_hdrlen(skb) &&
1500             skb_queue_len(&tp->ucopy.prequeue) == 0)
1501                 return false;
1502 
1503         /* Before escaping RCU protected region, we need to take care of skb
1504          * dst. Prequeue is only enabled for established sockets.
1505          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1506          * Instead of doing full sk_rx_dst validity here, let's perform
1507          * an optimistic check.
1508          */
1509         if (likely(sk->sk_rx_dst))
1510                 skb_dst_drop(skb);
1511         else
1512                 skb_dst_force_safe(skb);
1513 
1514         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1515         tp->ucopy.memory += skb->truesize;
1516         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1517                 struct sk_buff *skb1;
1518 
1519                 BUG_ON(sock_owned_by_user(sk));
1520 
1521                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1522                         sk_backlog_rcv(sk, skb1);
1523                         NET_INC_STATS_BH(sock_net(sk),
1524                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1525                 }
1526 
1527                 tp->ucopy.memory = 0;
1528         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1529                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1530                                            POLLIN | POLLRDNORM | POLLRDBAND);
1531                 if (!inet_csk_ack_scheduled(sk))
1532                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1533                                                   (3 * tcp_rto_min(sk)) / 4,
1534                                                   TCP_RTO_MAX);
1535         }
1536         return true;
1537 }
1538 EXPORT_SYMBOL(tcp_prequeue);
1539 
1540 /*
1541  *      From tcp_input.c
1542  */
1543 
1544 int tcp_v4_rcv(struct sk_buff *skb)
1545 {
1546         const struct iphdr *iph;
1547         const struct tcphdr *th;
1548         struct sock *sk;
1549         int ret;
1550         struct net *net = dev_net(skb->dev);
1551 
1552         if (skb->pkt_type != PACKET_HOST)
1553                 goto discard_it;
1554 
1555         /* Count it even if it's bad */
1556         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1557 
1558         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1559                 goto discard_it;
1560 
1561         th = tcp_hdr(skb);
1562 
1563         if (th->doff < sizeof(struct tcphdr) / 4)
1564                 goto bad_packet;
1565         if (!pskb_may_pull(skb, th->doff * 4))
1566                 goto discard_it;
1567 
1568         /* An explanation is required here, I think.
1569          * Packet length and doff are validated by header prediction,
1570          * provided case of th->doff==0 is eliminated.
1571          * So, we defer the checks. */
1572 
1573         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1574                 goto csum_error;
1575 
1576         th = tcp_hdr(skb);
1577         iph = ip_hdr(skb);
1578         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1579          * barrier() makes sure compiler wont play fool^Waliasing games.
1580          */
1581         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1582                 sizeof(struct inet_skb_parm));
1583         barrier();
1584 
1585         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1586         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1587                                     skb->len - th->doff * 4);
1588         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1589         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1590         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1591         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1592         TCP_SKB_CB(skb)->sacked  = 0;
1593 
1594         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1595         if (!sk)
1596                 goto no_tcp_socket;
1597 
1598 process:
1599         if (sk->sk_state == TCP_TIME_WAIT)
1600                 goto do_time_wait;
1601 
1602         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1603                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1604                 goto discard_and_relse;
1605         }
1606 
1607         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1608                 goto discard_and_relse;
1609 
1610 #ifdef CONFIG_TCP_MD5SIG
1611         /*
1612          * We really want to reject the packet as early as possible
1613          * if:
1614          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1615          *  o There is an MD5 option and we're not expecting one
1616          */
1617         if (tcp_v4_inbound_md5_hash(sk, skb))
1618                 goto discard_and_relse;
1619 #endif
1620 
1621         nf_reset(skb);
1622 
1623         if (sk_filter(sk, skb))
1624                 goto discard_and_relse;
1625 
1626         sk_incoming_cpu_update(sk);
1627         skb->dev = NULL;
1628 
1629         bh_lock_sock_nested(sk);
1630         tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1631         ret = 0;
1632         if (!sock_owned_by_user(sk)) {
1633                 if (!tcp_prequeue(sk, skb))
1634                         ret = tcp_v4_do_rcv(sk, skb);
1635         } else if (unlikely(sk_add_backlog(sk, skb,
1636                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1637                 bh_unlock_sock(sk);
1638                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1639                 goto discard_and_relse;
1640         }
1641         bh_unlock_sock(sk);
1642 
1643         sock_put(sk);
1644 
1645         return ret;
1646 
1647 no_tcp_socket:
1648         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1649                 goto discard_it;
1650 
1651         if (tcp_checksum_complete(skb)) {
1652 csum_error:
1653                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1654 bad_packet:
1655                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1656         } else {
1657                 tcp_v4_send_reset(NULL, skb);
1658         }
1659 
1660 discard_it:
1661         /* Discard frame. */
1662         kfree_skb(skb);
1663         return 0;
1664 
1665 discard_and_relse:
1666         sock_put(sk);
1667         goto discard_it;
1668 
1669 do_time_wait:
1670         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1671                 inet_twsk_put(inet_twsk(sk));
1672                 goto discard_it;
1673         }
1674 
1675         if (tcp_checksum_complete(skb)) {
1676                 inet_twsk_put(inet_twsk(sk));
1677                 goto csum_error;
1678         }
1679         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1680         case TCP_TW_SYN: {
1681                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1682                                                         &tcp_hashinfo,
1683                                                         iph->saddr, th->source,
1684                                                         iph->daddr, th->dest,
1685                                                         inet_iif(skb));
1686                 if (sk2) {
1687                         inet_twsk_deschedule_put(inet_twsk(sk));
1688                         sk = sk2;
1689                         goto process;
1690                 }
1691                 /* Fall through to ACK */
1692         }
1693         case TCP_TW_ACK:
1694                 tcp_v4_timewait_ack(sk, skb);
1695                 break;
1696         case TCP_TW_RST:
1697                 goto no_tcp_socket;
1698         case TCP_TW_SUCCESS:;
1699         }
1700         goto discard_it;
1701 }
1702 
1703 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1704         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1705         .twsk_unique    = tcp_twsk_unique,
1706         .twsk_destructor= tcp_twsk_destructor,
1707 };
1708 
1709 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1710 {
1711         struct dst_entry *dst = skb_dst(skb);
1712 
1713         if (dst && dst_hold_safe(dst)) {
1714                 sk->sk_rx_dst = dst;
1715                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1716         }
1717 }
1718 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1719 
1720 const struct inet_connection_sock_af_ops ipv4_specific = {
1721         .queue_xmit        = ip_queue_xmit,
1722         .send_check        = tcp_v4_send_check,
1723         .rebuild_header    = inet_sk_rebuild_header,
1724         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1725         .conn_request      = tcp_v4_conn_request,
1726         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1727         .net_header_len    = sizeof(struct iphdr),
1728         .setsockopt        = ip_setsockopt,
1729         .getsockopt        = ip_getsockopt,
1730         .addr2sockaddr     = inet_csk_addr2sockaddr,
1731         .sockaddr_len      = sizeof(struct sockaddr_in),
1732         .bind_conflict     = inet_csk_bind_conflict,
1733 #ifdef CONFIG_COMPAT
1734         .compat_setsockopt = compat_ip_setsockopt,
1735         .compat_getsockopt = compat_ip_getsockopt,
1736 #endif
1737         .mtu_reduced       = tcp_v4_mtu_reduced,
1738 };
1739 EXPORT_SYMBOL(ipv4_specific);
1740 
1741 #ifdef CONFIG_TCP_MD5SIG
1742 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1743         .md5_lookup             = tcp_v4_md5_lookup,
1744         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1745         .md5_parse              = tcp_v4_parse_md5_keys,
1746 };
1747 #endif
1748 
1749 /* NOTE: A lot of things set to zero explicitly by call to
1750  *       sk_alloc() so need not be done here.
1751  */
1752 static int tcp_v4_init_sock(struct sock *sk)
1753 {
1754         struct inet_connection_sock *icsk = inet_csk(sk);
1755 
1756         tcp_init_sock(sk);
1757 
1758         icsk->icsk_af_ops = &ipv4_specific;
1759 
1760 #ifdef CONFIG_TCP_MD5SIG
1761         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1762 #endif
1763 
1764         return 0;
1765 }
1766 
1767 void tcp_v4_destroy_sock(struct sock *sk)
1768 {
1769         struct tcp_sock *tp = tcp_sk(sk);
1770 
1771         tcp_clear_xmit_timers(sk);
1772 
1773         tcp_cleanup_congestion_control(sk);
1774 
1775         /* Cleanup up the write buffer. */
1776         tcp_write_queue_purge(sk);
1777 
1778         /* Cleans up our, hopefully empty, out_of_order_queue. */
1779         __skb_queue_purge(&tp->out_of_order_queue);
1780 
1781 #ifdef CONFIG_TCP_MD5SIG
1782         /* Clean up the MD5 key list, if any */
1783         if (tp->md5sig_info) {
1784                 tcp_clear_md5_list(sk);
1785                 kfree_rcu(tp->md5sig_info, rcu);
1786                 tp->md5sig_info = NULL;
1787         }
1788 #endif
1789 
1790         /* Clean prequeue, it must be empty really */
1791         __skb_queue_purge(&tp->ucopy.prequeue);
1792 
1793         /* Clean up a referenced TCP bind bucket. */
1794         if (inet_csk(sk)->icsk_bind_hash)
1795                 inet_put_port(sk);
1796 
1797         BUG_ON(tp->fastopen_rsk);
1798 
1799         /* If socket is aborted during connect operation */
1800         tcp_free_fastopen_req(tp);
1801         tcp_saved_syn_free(tp);
1802 
1803         sk_sockets_allocated_dec(sk);
1804         sock_release_memcg(sk);
1805 }
1806 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1807 
1808 #ifdef CONFIG_PROC_FS
1809 /* Proc filesystem TCP sock list dumping. */
1810 
1811 /*
1812  * Get next listener socket follow cur.  If cur is NULL, get first socket
1813  * starting from bucket given in st->bucket; when st->bucket is zero the
1814  * very first socket in the hash table is returned.
1815  */
1816 static void *listening_get_next(struct seq_file *seq, void *cur)
1817 {
1818         struct inet_connection_sock *icsk;
1819         struct hlist_nulls_node *node;
1820         struct sock *sk = cur;
1821         struct inet_listen_hashbucket *ilb;
1822         struct tcp_iter_state *st = seq->private;
1823         struct net *net = seq_file_net(seq);
1824 
1825         if (!sk) {
1826                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1827                 spin_lock_bh(&ilb->lock);
1828                 sk = sk_nulls_head(&ilb->head);
1829                 st->offset = 0;
1830                 goto get_sk;
1831         }
1832         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1833         ++st->num;
1834         ++st->offset;
1835 
1836         if (st->state == TCP_SEQ_STATE_OPENREQ) {
1837                 struct request_sock *req = cur;
1838 
1839                 icsk = inet_csk(st->syn_wait_sk);
1840                 req = req->dl_next;
1841                 while (1) {
1842                         while (req) {
1843                                 if (req->rsk_ops->family == st->family) {
1844                                         cur = req;
1845                                         goto out;
1846                                 }
1847                                 req = req->dl_next;
1848                         }
1849                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1850                                 break;
1851 get_req:
1852                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1853                 }
1854                 sk        = sk_nulls_next(st->syn_wait_sk);
1855                 st->state = TCP_SEQ_STATE_LISTENING;
1856                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1857         } else {
1858                 icsk = inet_csk(sk);
1859                 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1860                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1861                         goto start_req;
1862                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1863                 sk = sk_nulls_next(sk);
1864         }
1865 get_sk:
1866         sk_nulls_for_each_from(sk, node) {
1867                 if (!net_eq(sock_net(sk), net))
1868                         continue;
1869                 if (sk->sk_family == st->family) {
1870                         cur = sk;
1871                         goto out;
1872                 }
1873                 icsk = inet_csk(sk);
1874                 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1875                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1876 start_req:
1877                         st->uid         = sock_i_uid(sk);
1878                         st->syn_wait_sk = sk;
1879                         st->state       = TCP_SEQ_STATE_OPENREQ;
1880                         st->sbucket     = 0;
1881                         goto get_req;
1882                 }
1883                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1884         }
1885         spin_unlock_bh(&ilb->lock);
1886         st->offset = 0;
1887         if (++st->bucket < INET_LHTABLE_SIZE) {
1888                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1889                 spin_lock_bh(&ilb->lock);
1890                 sk = sk_nulls_head(&ilb->head);
1891                 goto get_sk;
1892         }
1893         cur = NULL;
1894 out:
1895         return cur;
1896 }
1897 
1898 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1899 {
1900         struct tcp_iter_state *st = seq->private;
1901         void *rc;
1902 
1903         st->bucket = 0;
1904         st->offset = 0;
1905         rc = listening_get_next(seq, NULL);
1906 
1907         while (rc && *pos) {
1908                 rc = listening_get_next(seq, rc);
1909                 --*pos;
1910         }
1911         return rc;
1912 }
1913 
1914 static inline bool empty_bucket(const struct tcp_iter_state *st)
1915 {
1916         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1917 }
1918 
1919 /*
1920  * Get first established socket starting from bucket given in st->bucket.
1921  * If st->bucket is zero, the very first socket in the hash is returned.
1922  */
1923 static void *established_get_first(struct seq_file *seq)
1924 {
1925         struct tcp_iter_state *st = seq->private;
1926         struct net *net = seq_file_net(seq);
1927         void *rc = NULL;
1928 
1929         st->offset = 0;
1930         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1931                 struct sock *sk;
1932                 struct hlist_nulls_node *node;
1933                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1934 
1935                 /* Lockless fast path for the common case of empty buckets */
1936                 if (empty_bucket(st))
1937                         continue;
1938 
1939                 spin_lock_bh(lock);
1940                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1941                         if (sk->sk_family != st->family ||
1942                             !net_eq(sock_net(sk), net)) {
1943                                 continue;
1944                         }
1945                         rc = sk;
1946                         goto out;
1947                 }
1948                 spin_unlock_bh(lock);
1949         }
1950 out:
1951         return rc;
1952 }
1953 
1954 static void *established_get_next(struct seq_file *seq, void *cur)
1955 {
1956         struct sock *sk = cur;
1957         struct hlist_nulls_node *node;
1958         struct tcp_iter_state *st = seq->private;
1959         struct net *net = seq_file_net(seq);
1960 
1961         ++st->num;
1962         ++st->offset;
1963 
1964         sk = sk_nulls_next(sk);
1965 
1966         sk_nulls_for_each_from(sk, node) {
1967                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1968                         return sk;
1969         }
1970 
1971         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1972         ++st->bucket;
1973         return established_get_first(seq);
1974 }
1975 
1976 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1977 {
1978         struct tcp_iter_state *st = seq->private;
1979         void *rc;
1980 
1981         st->bucket = 0;
1982         rc = established_get_first(seq);
1983 
1984         while (rc && pos) {
1985                 rc = established_get_next(seq, rc);
1986                 --pos;
1987         }
1988         return rc;
1989 }
1990 
1991 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1992 {
1993         void *rc;
1994         struct tcp_iter_state *st = seq->private;
1995 
1996         st->state = TCP_SEQ_STATE_LISTENING;
1997         rc        = listening_get_idx(seq, &pos);
1998 
1999         if (!rc) {
2000                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2001                 rc        = established_get_idx(seq, pos);
2002         }
2003 
2004         return rc;
2005 }
2006 
2007 static void *tcp_seek_last_pos(struct seq_file *seq)
2008 {
2009         struct tcp_iter_state *st = seq->private;
2010         int offset = st->offset;
2011         int orig_num = st->num;
2012         void *rc = NULL;
2013 
2014         switch (st->state) {
2015         case TCP_SEQ_STATE_OPENREQ:
2016         case TCP_SEQ_STATE_LISTENING:
2017                 if (st->bucket >= INET_LHTABLE_SIZE)
2018                         break;
2019                 st->state = TCP_SEQ_STATE_LISTENING;
2020                 rc = listening_get_next(seq, NULL);
2021                 while (offset-- && rc)
2022                         rc = listening_get_next(seq, rc);
2023                 if (rc)
2024                         break;
2025                 st->bucket = 0;
2026                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2027                 /* Fallthrough */
2028         case TCP_SEQ_STATE_ESTABLISHED:
2029                 if (st->bucket > tcp_hashinfo.ehash_mask)
2030                         break;
2031                 rc = established_get_first(seq);
2032                 while (offset-- && rc)
2033                         rc = established_get_next(seq, rc);
2034         }
2035 
2036         st->num = orig_num;
2037 
2038         return rc;
2039 }
2040 
2041 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2042 {
2043         struct tcp_iter_state *st = seq->private;
2044         void *rc;
2045 
2046         if (*pos && *pos == st->last_pos) {
2047                 rc = tcp_seek_last_pos(seq);
2048                 if (rc)
2049                         goto out;
2050         }
2051 
2052         st->state = TCP_SEQ_STATE_LISTENING;
2053         st->num = 0;
2054         st->bucket = 0;
2055         st->offset = 0;
2056         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2057 
2058 out:
2059         st->last_pos = *pos;
2060         return rc;
2061 }
2062 
2063 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2064 {
2065         struct tcp_iter_state *st = seq->private;
2066         void *rc = NULL;
2067 
2068         if (v == SEQ_START_TOKEN) {
2069                 rc = tcp_get_idx(seq, 0);
2070                 goto out;
2071         }
2072 
2073         switch (st->state) {
2074         case TCP_SEQ_STATE_OPENREQ:
2075         case TCP_SEQ_STATE_LISTENING:
2076                 rc = listening_get_next(seq, v);
2077                 if (!rc) {
2078                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2079                         st->bucket = 0;
2080                         st->offset = 0;
2081                         rc        = established_get_first(seq);
2082                 }
2083                 break;
2084         case TCP_SEQ_STATE_ESTABLISHED:
2085                 rc = established_get_next(seq, v);
2086                 break;
2087         }
2088 out:
2089         ++*pos;
2090         st->last_pos = *pos;
2091         return rc;
2092 }
2093 
2094 static void tcp_seq_stop(struct seq_file *seq, void *v)
2095 {
2096         struct tcp_iter_state *st = seq->private;
2097 
2098         switch (st->state) {
2099         case TCP_SEQ_STATE_OPENREQ:
2100                 if (v) {
2101                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2102                         spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2103                 }
2104         case TCP_SEQ_STATE_LISTENING:
2105                 if (v != SEQ_START_TOKEN)
2106                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2107                 break;
2108         case TCP_SEQ_STATE_ESTABLISHED:
2109                 if (v)
2110                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2111                 break;
2112         }
2113 }
2114 
2115 int tcp_seq_open(struct inode *inode, struct file *file)
2116 {
2117         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2118         struct tcp_iter_state *s;
2119         int err;
2120 
2121         err = seq_open_net(inode, file, &afinfo->seq_ops,
2122                           sizeof(struct tcp_iter_state));
2123         if (err < 0)
2124                 return err;
2125 
2126         s = ((struct seq_file *)file->private_data)->private;
2127         s->family               = afinfo->family;
2128         s->last_pos             = 0;
2129         return 0;
2130 }
2131 EXPORT_SYMBOL(tcp_seq_open);
2132 
2133 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2134 {
2135         int rc = 0;
2136         struct proc_dir_entry *p;
2137 
2138         afinfo->seq_ops.start           = tcp_seq_start;
2139         afinfo->seq_ops.next            = tcp_seq_next;
2140         afinfo->seq_ops.stop            = tcp_seq_stop;
2141 
2142         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2143                              afinfo->seq_fops, afinfo);
2144         if (!p)
2145                 rc = -ENOMEM;
2146         return rc;
2147 }
2148 EXPORT_SYMBOL(tcp_proc_register);
2149 
2150 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2151 {
2152         remove_proc_entry(afinfo->name, net->proc_net);
2153 }
2154 EXPORT_SYMBOL(tcp_proc_unregister);
2155 
2156 static void get_openreq4(const struct request_sock *req,
2157                          struct seq_file *f, int i, kuid_t uid)
2158 {
2159         const struct inet_request_sock *ireq = inet_rsk(req);
2160         long delta = req->rsk_timer.expires - jiffies;
2161 
2162         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2163                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2164                 i,
2165                 ireq->ir_loc_addr,
2166                 ireq->ir_num,
2167                 ireq->ir_rmt_addr,
2168                 ntohs(ireq->ir_rmt_port),
2169                 TCP_SYN_RECV,
2170                 0, 0, /* could print option size, but that is af dependent. */
2171                 1,    /* timers active (only the expire timer) */
2172                 jiffies_delta_to_clock_t(delta),
2173                 req->num_timeout,
2174                 from_kuid_munged(seq_user_ns(f), uid),
2175                 0,  /* non standard timer */
2176                 0, /* open_requests have no inode */
2177                 0,
2178                 req);
2179 }
2180 
2181 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2182 {
2183         int timer_active;
2184         unsigned long timer_expires;
2185         const struct tcp_sock *tp = tcp_sk(sk);
2186         const struct inet_connection_sock *icsk = inet_csk(sk);
2187         const struct inet_sock *inet = inet_sk(sk);
2188         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2189         __be32 dest = inet->inet_daddr;
2190         __be32 src = inet->inet_rcv_saddr;
2191         __u16 destp = ntohs(inet->inet_dport);
2192         __u16 srcp = ntohs(inet->inet_sport);
2193         int rx_queue;
2194 
2195         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2196             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2197             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2198                 timer_active    = 1;
2199                 timer_expires   = icsk->icsk_timeout;
2200         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2201                 timer_active    = 4;
2202                 timer_expires   = icsk->icsk_timeout;
2203         } else if (timer_pending(&sk->sk_timer)) {
2204                 timer_active    = 2;
2205                 timer_expires   = sk->sk_timer.expires;
2206         } else {
2207                 timer_active    = 0;
2208                 timer_expires = jiffies;
2209         }
2210 
2211         if (sk->sk_state == TCP_LISTEN)
2212                 rx_queue = sk->sk_ack_backlog;
2213         else
2214                 /*
2215                  * because we dont lock socket, we might find a transient negative value
2216                  */
2217                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2218 
2219         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2220                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2221                 i, src, srcp, dest, destp, sk->sk_state,
2222                 tp->write_seq - tp->snd_una,
2223                 rx_queue,
2224                 timer_active,
2225                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2226                 icsk->icsk_retransmits,
2227                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2228                 icsk->icsk_probes_out,
2229                 sock_i_ino(sk),
2230                 atomic_read(&sk->sk_refcnt), sk,
2231                 jiffies_to_clock_t(icsk->icsk_rto),
2232                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2233                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2234                 tp->snd_cwnd,
2235                 sk->sk_state == TCP_LISTEN ?
2236                     (fastopenq ? fastopenq->max_qlen : 0) :
2237                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2238 }
2239 
2240 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2241                                struct seq_file *f, int i)
2242 {
2243         long delta = tw->tw_timer.expires - jiffies;
2244         __be32 dest, src;
2245         __u16 destp, srcp;
2246 
2247         dest  = tw->tw_daddr;
2248         src   = tw->tw_rcv_saddr;
2249         destp = ntohs(tw->tw_dport);
2250         srcp  = ntohs(tw->tw_sport);
2251 
2252         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2253                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2254                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2255                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2256                 atomic_read(&tw->tw_refcnt), tw);
2257 }
2258 
2259 #define TMPSZ 150
2260 
2261 static int tcp4_seq_show(struct seq_file *seq, void *v)
2262 {
2263         struct tcp_iter_state *st;
2264         struct sock *sk = v;
2265 
2266         seq_setwidth(seq, TMPSZ - 1);
2267         if (v == SEQ_START_TOKEN) {
2268                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2269                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2270                            "inode");
2271                 goto out;
2272         }
2273         st = seq->private;
2274 
2275         switch (st->state) {
2276         case TCP_SEQ_STATE_LISTENING:
2277         case TCP_SEQ_STATE_ESTABLISHED:
2278                 if (sk->sk_state == TCP_TIME_WAIT)
2279                         get_timewait4_sock(v, seq, st->num);
2280                 else
2281                         get_tcp4_sock(v, seq, st->num);
2282                 break;
2283         case TCP_SEQ_STATE_OPENREQ:
2284                 get_openreq4(v, seq, st->num, st->uid);
2285                 break;
2286         }
2287 out:
2288         seq_pad(seq, '\n');
2289         return 0;
2290 }
2291 
2292 static const struct file_operations tcp_afinfo_seq_fops = {
2293         .owner   = THIS_MODULE,
2294         .open    = tcp_seq_open,
2295         .read    = seq_read,
2296         .llseek  = seq_lseek,
2297         .release = seq_release_net
2298 };
2299 
2300 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2301         .name           = "tcp",
2302         .family         = AF_INET,
2303         .seq_fops       = &tcp_afinfo_seq_fops,
2304         .seq_ops        = {
2305                 .show           = tcp4_seq_show,
2306         },
2307 };
2308 
2309 static int __net_init tcp4_proc_init_net(struct net *net)
2310 {
2311         return tcp_proc_register(net, &tcp4_seq_afinfo);
2312 }
2313 
2314 static void __net_exit tcp4_proc_exit_net(struct net *net)
2315 {
2316         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2317 }
2318 
2319 static struct pernet_operations tcp4_net_ops = {
2320         .init = tcp4_proc_init_net,
2321         .exit = tcp4_proc_exit_net,
2322 };
2323 
2324 int __init tcp4_proc_init(void)
2325 {
2326         return register_pernet_subsys(&tcp4_net_ops);
2327 }
2328 
2329 void tcp4_proc_exit(void)
2330 {
2331         unregister_pernet_subsys(&tcp4_net_ops);
2332 }
2333 #endif /* CONFIG_PROC_FS */
2334 
2335 struct proto tcp_prot = {
2336         .name                   = "TCP",
2337         .owner                  = THIS_MODULE,
2338         .close                  = tcp_close,
2339         .connect                = tcp_v4_connect,
2340         .disconnect             = tcp_disconnect,
2341         .accept                 = inet_csk_accept,
2342         .ioctl                  = tcp_ioctl,
2343         .init                   = tcp_v4_init_sock,
2344         .destroy                = tcp_v4_destroy_sock,
2345         .shutdown               = tcp_shutdown,
2346         .setsockopt             = tcp_setsockopt,
2347         .getsockopt             = tcp_getsockopt,
2348         .recvmsg                = tcp_recvmsg,
2349         .sendmsg                = tcp_sendmsg,
2350         .sendpage               = tcp_sendpage,
2351         .backlog_rcv            = tcp_v4_do_rcv,
2352         .release_cb             = tcp_release_cb,
2353         .hash                   = inet_hash,
2354         .unhash                 = inet_unhash,
2355         .get_port               = inet_csk_get_port,
2356         .enter_memory_pressure  = tcp_enter_memory_pressure,
2357         .stream_memory_free     = tcp_stream_memory_free,
2358         .sockets_allocated      = &tcp_sockets_allocated,
2359         .orphan_count           = &tcp_orphan_count,
2360         .memory_allocated       = &tcp_memory_allocated,
2361         .memory_pressure        = &tcp_memory_pressure,
2362         .sysctl_mem             = sysctl_tcp_mem,
2363         .sysctl_wmem            = sysctl_tcp_wmem,
2364         .sysctl_rmem            = sysctl_tcp_rmem,
2365         .max_header             = MAX_TCP_HEADER,
2366         .obj_size               = sizeof(struct tcp_sock),
2367         .slab_flags             = SLAB_DESTROY_BY_RCU,
2368         .twsk_prot              = &tcp_timewait_sock_ops,
2369         .rsk_prot               = &tcp_request_sock_ops,
2370         .h.hashinfo             = &tcp_hashinfo,
2371         .no_autobind            = true,
2372 #ifdef CONFIG_COMPAT
2373         .compat_setsockopt      = compat_tcp_setsockopt,
2374         .compat_getsockopt      = compat_tcp_getsockopt,
2375 #endif
2376 #ifdef CONFIG_MEMCG_KMEM
2377         .init_cgroup            = tcp_init_cgroup,
2378         .destroy_cgroup         = tcp_destroy_cgroup,
2379         .proto_cgroup           = tcp_proto_cgroup,
2380 #endif
2381 };
2382 EXPORT_SYMBOL(tcp_prot);
2383 
2384 static void __net_exit tcp_sk_exit(struct net *net)
2385 {
2386         int cpu;
2387 
2388         for_each_possible_cpu(cpu)
2389                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2390         free_percpu(net->ipv4.tcp_sk);
2391 }
2392 
2393 static int __net_init tcp_sk_init(struct net *net)
2394 {
2395         int res, cpu;
2396 
2397         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2398         if (!net->ipv4.tcp_sk)
2399                 return -ENOMEM;
2400 
2401         for_each_possible_cpu(cpu) {
2402                 struct sock *sk;
2403 
2404                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2405                                            IPPROTO_TCP, net);
2406                 if (res)
2407                         goto fail;
2408                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2409         }
2410 
2411         net->ipv4.sysctl_tcp_ecn = 2;
2412         net->ipv4.sysctl_tcp_ecn_fallback = 1;
2413 
2414         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2415         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2416         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2417 
2418         return 0;
2419 fail:
2420         tcp_sk_exit(net);
2421 
2422         return res;
2423 }
2424 
2425 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2426 {
2427         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2428 }
2429 
2430 static struct pernet_operations __net_initdata tcp_sk_ops = {
2431        .init       = tcp_sk_init,
2432        .exit       = tcp_sk_exit,
2433        .exit_batch = tcp_sk_exit_batch,
2434 };
2435 
2436 void __init tcp_v4_init(void)
2437 {
2438         inet_hashinfo_init(&tcp_hashinfo);
2439         if (register_pernet_subsys(&tcp_sk_ops))
2440                 panic("Failed to create the TCP control socket.\n");
2441 }
2442 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp