~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv6/tcp_ipv6.c

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      TCP over IPv6
  3  *      Linux INET6 implementation
  4  *
  5  *      Authors:
  6  *      Pedro Roque             <roque@di.fc.ul.pt>
  7  *
  8  *      Based on:
  9  *      linux/net/ipv4/tcp.c
 10  *      linux/net/ipv4/tcp_input.c
 11  *      linux/net/ipv4/tcp_output.c
 12  *
 13  *      Fixes:
 14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
 15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 17  *                                      a single port at the same time.
 18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
 19  *
 20  *      This program is free software; you can redistribute it and/or
 21  *      modify it under the terms of the GNU General Public License
 22  *      as published by the Free Software Foundation; either version
 23  *      2 of the License, or (at your option) any later version.
 24  */
 25 
 26 #include <linux/bottom_half.h>
 27 #include <linux/module.h>
 28 #include <linux/errno.h>
 29 #include <linux/types.h>
 30 #include <linux/socket.h>
 31 #include <linux/sockios.h>
 32 #include <linux/net.h>
 33 #include <linux/jiffies.h>
 34 #include <linux/in.h>
 35 #include <linux/in6.h>
 36 #include <linux/netdevice.h>
 37 #include <linux/init.h>
 38 #include <linux/jhash.h>
 39 #include <linux/ipsec.h>
 40 #include <linux/times.h>
 41 #include <linux/slab.h>
 42 #include <linux/uaccess.h>
 43 #include <linux/ipv6.h>
 44 #include <linux/icmpv6.h>
 45 #include <linux/random.h>
 46 
 47 #include <net/tcp.h>
 48 #include <net/ndisc.h>
 49 #include <net/inet6_hashtables.h>
 50 #include <net/inet6_connection_sock.h>
 51 #include <net/ipv6.h>
 52 #include <net/transp_v6.h>
 53 #include <net/addrconf.h>
 54 #include <net/ip6_route.h>
 55 #include <net/ip6_checksum.h>
 56 #include <net/inet_ecn.h>
 57 #include <net/protocol.h>
 58 #include <net/xfrm.h>
 59 #include <net/snmp.h>
 60 #include <net/dsfield.h>
 61 #include <net/timewait_sock.h>
 62 #include <net/inet_common.h>
 63 #include <net/secure_seq.h>
 64 #include <net/busy_poll.h>
 65 
 66 #include <linux/proc_fs.h>
 67 #include <linux/seq_file.h>
 68 
 69 #include <crypto/hash.h>
 70 #include <linux/scatterlist.h>
 71 
 72 #include <trace/events/tcp.h>
 73 
 74 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
 75 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 76                                       struct request_sock *req);
 77 
 78 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 79 
 80 static const struct inet_connection_sock_af_ops ipv6_mapped;
 81 static const struct inet_connection_sock_af_ops ipv6_specific;
 82 #ifdef CONFIG_TCP_MD5SIG
 83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
 84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 85 #else
 86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 87                                                    const struct in6_addr *addr)
 88 {
 89         return NULL;
 90 }
 91 #endif
 92 
 93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 94 {
 95         struct dst_entry *dst = skb_dst(skb);
 96 
 97         if (dst && dst_hold_safe(dst)) {
 98                 const struct rt6_info *rt = (const struct rt6_info *)dst;
 99 
100                 sk->sk_rx_dst = dst;
101                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103         }
104 }
105 
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108         return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109                                 ipv6_hdr(skb)->saddr.s6_addr32,
110                                 tcp_hdr(skb)->dest,
111                                 tcp_hdr(skb)->source);
112 }
113 
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116         return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117                                    ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119 
120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
121                           int addr_len)
122 {
123         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
124         struct inet_sock *inet = inet_sk(sk);
125         struct inet_connection_sock *icsk = inet_csk(sk);
126         struct ipv6_pinfo *np = inet6_sk(sk);
127         struct tcp_sock *tp = tcp_sk(sk);
128         struct in6_addr *saddr = NULL, *final_p, final;
129         struct ipv6_txoptions *opt;
130         struct flowi6 fl6;
131         struct dst_entry *dst;
132         int addr_type;
133         int err;
134         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
135 
136         if (addr_len < SIN6_LEN_RFC2133)
137                 return -EINVAL;
138 
139         if (usin->sin6_family != AF_INET6)
140                 return -EAFNOSUPPORT;
141 
142         memset(&fl6, 0, sizeof(fl6));
143 
144         if (np->sndflow) {
145                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146                 IP6_ECN_flow_init(fl6.flowlabel);
147                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
148                         struct ip6_flowlabel *flowlabel;
149                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
150                         if (!flowlabel)
151                                 return -EINVAL;
152                         fl6_sock_release(flowlabel);
153                 }
154         }
155 
156         /*
157          *      connect() to INADDR_ANY means loopback (BSD'ism).
158          */
159 
160         if (ipv6_addr_any(&usin->sin6_addr)) {
161                 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
162                         ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
163                                                &usin->sin6_addr);
164                 else
165                         usin->sin6_addr = in6addr_loopback;
166         }
167 
168         addr_type = ipv6_addr_type(&usin->sin6_addr);
169 
170         if (addr_type & IPV6_ADDR_MULTICAST)
171                 return -ENETUNREACH;
172 
173         if (addr_type&IPV6_ADDR_LINKLOCAL) {
174                 if (addr_len >= sizeof(struct sockaddr_in6) &&
175                     usin->sin6_scope_id) {
176                         /* If interface is set while binding, indices
177                          * must coincide.
178                          */
179                         if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
180                                 return -EINVAL;
181 
182                         sk->sk_bound_dev_if = usin->sin6_scope_id;
183                 }
184 
185                 /* Connect to link-local address requires an interface */
186                 if (!sk->sk_bound_dev_if)
187                         return -EINVAL;
188         }
189 
190         if (tp->rx_opt.ts_recent_stamp &&
191             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
192                 tp->rx_opt.ts_recent = 0;
193                 tp->rx_opt.ts_recent_stamp = 0;
194                 tp->write_seq = 0;
195         }
196 
197         sk->sk_v6_daddr = usin->sin6_addr;
198         np->flow_label = fl6.flowlabel;
199 
200         /*
201          *      TCP over IPv4
202          */
203 
204         if (addr_type & IPV6_ADDR_MAPPED) {
205                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
206                 struct sockaddr_in sin;
207 
208                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209 
210                 if (__ipv6_only_sock(sk))
211                         return -ENETUNREACH;
212 
213                 sin.sin_family = AF_INET;
214                 sin.sin_port = usin->sin6_port;
215                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216 
217                 icsk->icsk_af_ops = &ipv6_mapped;
218                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
221 #endif
222 
223                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
224 
225                 if (err) {
226                         icsk->icsk_ext_hdr_len = exthdrlen;
227                         icsk->icsk_af_ops = &ipv6_specific;
228                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
229 #ifdef CONFIG_TCP_MD5SIG
230                         tp->af_specific = &tcp_sock_ipv6_specific;
231 #endif
232                         goto failure;
233                 }
234                 np->saddr = sk->sk_v6_rcv_saddr;
235 
236                 return err;
237         }
238 
239         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
240                 saddr = &sk->sk_v6_rcv_saddr;
241 
242         fl6.flowi6_proto = IPPROTO_TCP;
243         fl6.daddr = sk->sk_v6_daddr;
244         fl6.saddr = saddr ? *saddr : np->saddr;
245         fl6.flowi6_oif = sk->sk_bound_dev_if;
246         fl6.flowi6_mark = sk->sk_mark;
247         fl6.fl6_dport = usin->sin6_port;
248         fl6.fl6_sport = inet->inet_sport;
249         fl6.flowi6_uid = sk->sk_uid;
250 
251         opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
252         final_p = fl6_update_dst(&fl6, opt, &final);
253 
254         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
255 
256         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
257         if (IS_ERR(dst)) {
258                 err = PTR_ERR(dst);
259                 goto failure;
260         }
261 
262         if (!saddr) {
263                 saddr = &fl6.saddr;
264                 sk->sk_v6_rcv_saddr = *saddr;
265         }
266 
267         /* set the source address */
268         np->saddr = *saddr;
269         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
270 
271         sk->sk_gso_type = SKB_GSO_TCPV6;
272         ip6_dst_store(sk, dst, NULL, NULL);
273 
274         icsk->icsk_ext_hdr_len = 0;
275         if (opt)
276                 icsk->icsk_ext_hdr_len = opt->opt_flen +
277                                          opt->opt_nflen;
278 
279         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
280 
281         inet->inet_dport = usin->sin6_port;
282 
283         tcp_set_state(sk, TCP_SYN_SENT);
284         err = inet6_hash_connect(tcp_death_row, sk);
285         if (err)
286                 goto late_failure;
287 
288         sk_set_txhash(sk);
289 
290         if (likely(!tp->repair)) {
291                 if (!tp->write_seq)
292                         tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
293                                                          sk->sk_v6_daddr.s6_addr32,
294                                                          inet->inet_sport,
295                                                          inet->inet_dport);
296                 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
297                                                    np->saddr.s6_addr32,
298                                                    sk->sk_v6_daddr.s6_addr32);
299         }
300 
301         if (tcp_fastopen_defer_connect(sk, &err))
302                 return err;
303         if (err)
304                 goto late_failure;
305 
306         err = tcp_connect(sk);
307         if (err)
308                 goto late_failure;
309 
310         return 0;
311 
312 late_failure:
313         tcp_set_state(sk, TCP_CLOSE);
314 failure:
315         inet->inet_dport = 0;
316         sk->sk_route_caps = 0;
317         return err;
318 }
319 
320 static void tcp_v6_mtu_reduced(struct sock *sk)
321 {
322         struct dst_entry *dst;
323 
324         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
325                 return;
326 
327         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
328         if (!dst)
329                 return;
330 
331         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
332                 tcp_sync_mss(sk, dst_mtu(dst));
333                 tcp_simple_retransmit(sk);
334         }
335 }
336 
337 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
338                 u8 type, u8 code, int offset, __be32 info)
339 {
340         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
341         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
342         struct net *net = dev_net(skb->dev);
343         struct request_sock *fastopen;
344         struct ipv6_pinfo *np;
345         struct tcp_sock *tp;
346         __u32 seq, snd_una;
347         struct sock *sk;
348         bool fatal;
349         int err;
350 
351         sk = __inet6_lookup_established(net, &tcp_hashinfo,
352                                         &hdr->daddr, th->dest,
353                                         &hdr->saddr, ntohs(th->source),
354                                         skb->dev->ifindex, inet6_sdif(skb));
355 
356         if (!sk) {
357                 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
358                                   ICMP6_MIB_INERRORS);
359                 return;
360         }
361 
362         if (sk->sk_state == TCP_TIME_WAIT) {
363                 inet_twsk_put(inet_twsk(sk));
364                 return;
365         }
366         seq = ntohl(th->seq);
367         fatal = icmpv6_err_convert(type, code, &err);
368         if (sk->sk_state == TCP_NEW_SYN_RECV)
369                 return tcp_req_err(sk, seq, fatal);
370 
371         bh_lock_sock(sk);
372         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
373                 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
374 
375         if (sk->sk_state == TCP_CLOSE)
376                 goto out;
377 
378         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
379                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
380                 goto out;
381         }
382 
383         tp = tcp_sk(sk);
384         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
385         fastopen = tp->fastopen_rsk;
386         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
387         if (sk->sk_state != TCP_LISTEN &&
388             !between(seq, snd_una, tp->snd_nxt)) {
389                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
390                 goto out;
391         }
392 
393         np = inet6_sk(sk);
394 
395         if (type == NDISC_REDIRECT) {
396                 if (!sock_owned_by_user(sk)) {
397                         struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
398 
399                         if (dst)
400                                 dst->ops->redirect(dst, sk, skb);
401                 }
402                 goto out;
403         }
404 
405         if (type == ICMPV6_PKT_TOOBIG) {
406                 /* We are not interested in TCP_LISTEN and open_requests
407                  * (SYN-ACKs send out by Linux are always <576bytes so
408                  * they should go through unfragmented).
409                  */
410                 if (sk->sk_state == TCP_LISTEN)
411                         goto out;
412 
413                 if (!ip6_sk_accept_pmtu(sk))
414                         goto out;
415 
416                 tp->mtu_info = ntohl(info);
417                 if (!sock_owned_by_user(sk))
418                         tcp_v6_mtu_reduced(sk);
419                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
420                                            &sk->sk_tsq_flags))
421                         sock_hold(sk);
422                 goto out;
423         }
424 
425 
426         /* Might be for an request_sock */
427         switch (sk->sk_state) {
428         case TCP_SYN_SENT:
429         case TCP_SYN_RECV:
430                 /* Only in fast or simultaneous open. If a fast open socket is
431                  * is already accepted it is treated as a connected one below.
432                  */
433                 if (fastopen && !fastopen->sk)
434                         break;
435 
436                 if (!sock_owned_by_user(sk)) {
437                         sk->sk_err = err;
438                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
439 
440                         tcp_done(sk);
441                 } else
442                         sk->sk_err_soft = err;
443                 goto out;
444         }
445 
446         if (!sock_owned_by_user(sk) && np->recverr) {
447                 sk->sk_err = err;
448                 sk->sk_error_report(sk);
449         } else
450                 sk->sk_err_soft = err;
451 
452 out:
453         bh_unlock_sock(sk);
454         sock_put(sk);
455 }
456 
457 
458 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
459                               struct flowi *fl,
460                               struct request_sock *req,
461                               struct tcp_fastopen_cookie *foc,
462                               enum tcp_synack_type synack_type)
463 {
464         struct inet_request_sock *ireq = inet_rsk(req);
465         struct ipv6_pinfo *np = inet6_sk(sk);
466         struct ipv6_txoptions *opt;
467         struct flowi6 *fl6 = &fl->u.ip6;
468         struct sk_buff *skb;
469         int err = -ENOMEM;
470 
471         /* First, grab a route. */
472         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
473                                                IPPROTO_TCP)) == NULL)
474                 goto done;
475 
476         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
477 
478         if (skb) {
479                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
480                                     &ireq->ir_v6_rmt_addr);
481 
482                 fl6->daddr = ireq->ir_v6_rmt_addr;
483                 if (np->repflow && ireq->pktopts)
484                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
485 
486                 rcu_read_lock();
487                 opt = ireq->ipv6_opt;
488                 if (!opt)
489                         opt = rcu_dereference(np->opt);
490                 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
491                 rcu_read_unlock();
492                 err = net_xmit_eval(err);
493         }
494 
495 done:
496         return err;
497 }
498 
499 
500 static void tcp_v6_reqsk_destructor(struct request_sock *req)
501 {
502         kfree(inet_rsk(req)->ipv6_opt);
503         kfree_skb(inet_rsk(req)->pktopts);
504 }
505 
506 #ifdef CONFIG_TCP_MD5SIG
507 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
508                                                    const struct in6_addr *addr)
509 {
510         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
511 }
512 
513 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
514                                                 const struct sock *addr_sk)
515 {
516         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
517 }
518 
519 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
520                                  char __user *optval, int optlen)
521 {
522         struct tcp_md5sig cmd;
523         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
524         u8 prefixlen;
525 
526         if (optlen < sizeof(cmd))
527                 return -EINVAL;
528 
529         if (copy_from_user(&cmd, optval, sizeof(cmd)))
530                 return -EFAULT;
531 
532         if (sin6->sin6_family != AF_INET6)
533                 return -EINVAL;
534 
535         if (optname == TCP_MD5SIG_EXT &&
536             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
537                 prefixlen = cmd.tcpm_prefixlen;
538                 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
539                                         prefixlen > 32))
540                         return -EINVAL;
541         } else {
542                 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
543         }
544 
545         if (!cmd.tcpm_keylen) {
546                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
547                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
548                                               AF_INET, prefixlen);
549                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
550                                       AF_INET6, prefixlen);
551         }
552 
553         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
554                 return -EINVAL;
555 
556         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
557                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
558                                       AF_INET, prefixlen, cmd.tcpm_key,
559                                       cmd.tcpm_keylen, GFP_KERNEL);
560 
561         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
562                               AF_INET6, prefixlen, cmd.tcpm_key,
563                               cmd.tcpm_keylen, GFP_KERNEL);
564 }
565 
566 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
567                                    const struct in6_addr *daddr,
568                                    const struct in6_addr *saddr,
569                                    const struct tcphdr *th, int nbytes)
570 {
571         struct tcp6_pseudohdr *bp;
572         struct scatterlist sg;
573         struct tcphdr *_th;
574 
575         bp = hp->scratch;
576         /* 1. TCP pseudo-header (RFC2460) */
577         bp->saddr = *saddr;
578         bp->daddr = *daddr;
579         bp->protocol = cpu_to_be32(IPPROTO_TCP);
580         bp->len = cpu_to_be32(nbytes);
581 
582         _th = (struct tcphdr *)(bp + 1);
583         memcpy(_th, th, sizeof(*th));
584         _th->check = 0;
585 
586         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
587         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
588                                 sizeof(*bp) + sizeof(*th));
589         return crypto_ahash_update(hp->md5_req);
590 }
591 
592 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
593                                const struct in6_addr *daddr, struct in6_addr *saddr,
594                                const struct tcphdr *th)
595 {
596         struct tcp_md5sig_pool *hp;
597         struct ahash_request *req;
598 
599         hp = tcp_get_md5sig_pool();
600         if (!hp)
601                 goto clear_hash_noput;
602         req = hp->md5_req;
603 
604         if (crypto_ahash_init(req))
605                 goto clear_hash;
606         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
607                 goto clear_hash;
608         if (tcp_md5_hash_key(hp, key))
609                 goto clear_hash;
610         ahash_request_set_crypt(req, NULL, md5_hash, 0);
611         if (crypto_ahash_final(req))
612                 goto clear_hash;
613 
614         tcp_put_md5sig_pool();
615         return 0;
616 
617 clear_hash:
618         tcp_put_md5sig_pool();
619 clear_hash_noput:
620         memset(md5_hash, 0, 16);
621         return 1;
622 }
623 
624 static int tcp_v6_md5_hash_skb(char *md5_hash,
625                                const struct tcp_md5sig_key *key,
626                                const struct sock *sk,
627                                const struct sk_buff *skb)
628 {
629         const struct in6_addr *saddr, *daddr;
630         struct tcp_md5sig_pool *hp;
631         struct ahash_request *req;
632         const struct tcphdr *th = tcp_hdr(skb);
633 
634         if (sk) { /* valid for establish/request sockets */
635                 saddr = &sk->sk_v6_rcv_saddr;
636                 daddr = &sk->sk_v6_daddr;
637         } else {
638                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
639                 saddr = &ip6h->saddr;
640                 daddr = &ip6h->daddr;
641         }
642 
643         hp = tcp_get_md5sig_pool();
644         if (!hp)
645                 goto clear_hash_noput;
646         req = hp->md5_req;
647 
648         if (crypto_ahash_init(req))
649                 goto clear_hash;
650 
651         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
652                 goto clear_hash;
653         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
654                 goto clear_hash;
655         if (tcp_md5_hash_key(hp, key))
656                 goto clear_hash;
657         ahash_request_set_crypt(req, NULL, md5_hash, 0);
658         if (crypto_ahash_final(req))
659                 goto clear_hash;
660 
661         tcp_put_md5sig_pool();
662         return 0;
663 
664 clear_hash:
665         tcp_put_md5sig_pool();
666 clear_hash_noput:
667         memset(md5_hash, 0, 16);
668         return 1;
669 }
670 
671 #endif
672 
673 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
674                                     const struct sk_buff *skb)
675 {
676 #ifdef CONFIG_TCP_MD5SIG
677         const __u8 *hash_location = NULL;
678         struct tcp_md5sig_key *hash_expected;
679         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
680         const struct tcphdr *th = tcp_hdr(skb);
681         int genhash;
682         u8 newhash[16];
683 
684         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
685         hash_location = tcp_parse_md5sig_option(th);
686 
687         /* We've parsed the options - do we have a hash? */
688         if (!hash_expected && !hash_location)
689                 return false;
690 
691         if (hash_expected && !hash_location) {
692                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
693                 return true;
694         }
695 
696         if (!hash_expected && hash_location) {
697                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
698                 return true;
699         }
700 
701         /* check the signature */
702         genhash = tcp_v6_md5_hash_skb(newhash,
703                                       hash_expected,
704                                       NULL, skb);
705 
706         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
707                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
708                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
709                                      genhash ? "failed" : "mismatch",
710                                      &ip6h->saddr, ntohs(th->source),
711                                      &ip6h->daddr, ntohs(th->dest));
712                 return true;
713         }
714 #endif
715         return false;
716 }
717 
718 static void tcp_v6_init_req(struct request_sock *req,
719                             const struct sock *sk_listener,
720                             struct sk_buff *skb)
721 {
722         struct inet_request_sock *ireq = inet_rsk(req);
723         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
724 
725         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
726         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
727 
728         /* So that link locals have meaning */
729         if (!sk_listener->sk_bound_dev_if &&
730             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
731                 ireq->ir_iif = tcp_v6_iif(skb);
732 
733         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
734             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
735              np->rxopt.bits.rxinfo ||
736              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
737              np->rxopt.bits.rxohlim || np->repflow)) {
738                 refcount_inc(&skb->users);
739                 ireq->pktopts = skb;
740         }
741 }
742 
743 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
744                                           struct flowi *fl,
745                                           const struct request_sock *req)
746 {
747         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
748 }
749 
750 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
751         .family         =       AF_INET6,
752         .obj_size       =       sizeof(struct tcp6_request_sock),
753         .rtx_syn_ack    =       tcp_rtx_synack,
754         .send_ack       =       tcp_v6_reqsk_send_ack,
755         .destructor     =       tcp_v6_reqsk_destructor,
756         .send_reset     =       tcp_v6_send_reset,
757         .syn_ack_timeout =      tcp_syn_ack_timeout,
758 };
759 
760 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
761         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
762                                 sizeof(struct ipv6hdr),
763 #ifdef CONFIG_TCP_MD5SIG
764         .req_md5_lookup =       tcp_v6_md5_lookup,
765         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
766 #endif
767         .init_req       =       tcp_v6_init_req,
768 #ifdef CONFIG_SYN_COOKIES
769         .cookie_init_seq =      cookie_v6_init_sequence,
770 #endif
771         .route_req      =       tcp_v6_route_req,
772         .init_seq       =       tcp_v6_init_seq,
773         .init_ts_off    =       tcp_v6_init_ts_off,
774         .send_synack    =       tcp_v6_send_synack,
775 };
776 
777 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
778                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
779                                  int oif, struct tcp_md5sig_key *key, int rst,
780                                  u8 tclass, __be32 label)
781 {
782         const struct tcphdr *th = tcp_hdr(skb);
783         struct tcphdr *t1;
784         struct sk_buff *buff;
785         struct flowi6 fl6;
786         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
787         struct sock *ctl_sk = net->ipv6.tcp_sk;
788         unsigned int tot_len = sizeof(struct tcphdr);
789         struct dst_entry *dst;
790         __be32 *topt;
791 
792         if (tsecr)
793                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
794 #ifdef CONFIG_TCP_MD5SIG
795         if (key)
796                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
797 #endif
798 
799         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
800                          GFP_ATOMIC);
801         if (!buff)
802                 return;
803 
804         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
805 
806         t1 = skb_push(buff, tot_len);
807         skb_reset_transport_header(buff);
808 
809         /* Swap the send and the receive. */
810         memset(t1, 0, sizeof(*t1));
811         t1->dest = th->source;
812         t1->source = th->dest;
813         t1->doff = tot_len / 4;
814         t1->seq = htonl(seq);
815         t1->ack_seq = htonl(ack);
816         t1->ack = !rst || !th->ack;
817         t1->rst = rst;
818         t1->window = htons(win);
819 
820         topt = (__be32 *)(t1 + 1);
821 
822         if (tsecr) {
823                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
824                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
825                 *topt++ = htonl(tsval);
826                 *topt++ = htonl(tsecr);
827         }
828 
829 #ifdef CONFIG_TCP_MD5SIG
830         if (key) {
831                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
832                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
833                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
834                                     &ipv6_hdr(skb)->saddr,
835                                     &ipv6_hdr(skb)->daddr, t1);
836         }
837 #endif
838 
839         memset(&fl6, 0, sizeof(fl6));
840         fl6.daddr = ipv6_hdr(skb)->saddr;
841         fl6.saddr = ipv6_hdr(skb)->daddr;
842         fl6.flowlabel = label;
843 
844         buff->ip_summed = CHECKSUM_PARTIAL;
845         buff->csum = 0;
846 
847         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
848 
849         fl6.flowi6_proto = IPPROTO_TCP;
850         if (rt6_need_strict(&fl6.daddr) && !oif)
851                 fl6.flowi6_oif = tcp_v6_iif(skb);
852         else {
853                 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
854                         oif = skb->skb_iif;
855 
856                 fl6.flowi6_oif = oif;
857         }
858 
859         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
860         fl6.fl6_dport = t1->dest;
861         fl6.fl6_sport = t1->source;
862         fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
863         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
864 
865         /* Pass a socket to ip6_dst_lookup either it is for RST
866          * Underlying function will use this to retrieve the network
867          * namespace
868          */
869         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
870         if (!IS_ERR(dst)) {
871                 skb_dst_set(buff, dst);
872                 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
873                 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
874                 if (rst)
875                         TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
876                 return;
877         }
878 
879         kfree_skb(buff);
880 }
881 
882 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
883 {
884         const struct tcphdr *th = tcp_hdr(skb);
885         u32 seq = 0, ack_seq = 0;
886         struct tcp_md5sig_key *key = NULL;
887 #ifdef CONFIG_TCP_MD5SIG
888         const __u8 *hash_location = NULL;
889         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
890         unsigned char newhash[16];
891         int genhash;
892         struct sock *sk1 = NULL;
893 #endif
894         int oif = 0;
895 
896         if (th->rst)
897                 return;
898 
899         /* If sk not NULL, it means we did a successful lookup and incoming
900          * route had to be correct. prequeue might have dropped our dst.
901          */
902         if (!sk && !ipv6_unicast_destination(skb))
903                 return;
904 
905 #ifdef CONFIG_TCP_MD5SIG
906         rcu_read_lock();
907         hash_location = tcp_parse_md5sig_option(th);
908         if (sk && sk_fullsock(sk)) {
909                 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
910         } else if (hash_location) {
911                 /*
912                  * active side is lost. Try to find listening socket through
913                  * source port, and then find md5 key through listening socket.
914                  * we are not loose security here:
915                  * Incoming packet is checked with md5 hash with finding key,
916                  * no RST generated if md5 hash doesn't match.
917                  */
918                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
919                                            &tcp_hashinfo, NULL, 0,
920                                            &ipv6h->saddr,
921                                            th->source, &ipv6h->daddr,
922                                            ntohs(th->source), tcp_v6_iif(skb),
923                                            tcp_v6_sdif(skb));
924                 if (!sk1)
925                         goto out;
926 
927                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
928                 if (!key)
929                         goto out;
930 
931                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
932                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
933                         goto out;
934         }
935 #endif
936 
937         if (th->ack)
938                 seq = ntohl(th->ack_seq);
939         else
940                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
941                           (th->doff << 2);
942 
943         if (sk) {
944                 oif = sk->sk_bound_dev_if;
945                 if (sk_fullsock(sk))
946                         trace_tcp_send_reset(sk, skb);
947         }
948 
949         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
950 
951 #ifdef CONFIG_TCP_MD5SIG
952 out:
953         rcu_read_unlock();
954 #endif
955 }
956 
957 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
958                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
959                             struct tcp_md5sig_key *key, u8 tclass,
960                             __be32 label)
961 {
962         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
963                              tclass, label);
964 }
965 
966 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
967 {
968         struct inet_timewait_sock *tw = inet_twsk(sk);
969         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
970 
971         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
972                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
973                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
974                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
975                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
976 
977         inet_twsk_put(tw);
978 }
979 
980 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
981                                   struct request_sock *req)
982 {
983         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
984          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
985          */
986         /* RFC 7323 2.3
987          * The window field (SEG.WND) of every outgoing segment, with the
988          * exception of <SYN> segments, MUST be right-shifted by
989          * Rcv.Wind.Shift bits:
990          */
991         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
992                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
993                         tcp_rsk(req)->rcv_nxt,
994                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
995                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
996                         req->ts_recent, sk->sk_bound_dev_if,
997                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
998                         0, 0);
999 }
1000 
1001 
1002 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1003 {
1004 #ifdef CONFIG_SYN_COOKIES
1005         const struct tcphdr *th = tcp_hdr(skb);
1006 
1007         if (!th->syn)
1008                 sk = cookie_v6_check(sk, skb);
1009 #endif
1010         return sk;
1011 }
1012 
1013 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1014 {
1015         if (skb->protocol == htons(ETH_P_IP))
1016                 return tcp_v4_conn_request(sk, skb);
1017 
1018         if (!ipv6_unicast_destination(skb))
1019                 goto drop;
1020 
1021         return tcp_conn_request(&tcp6_request_sock_ops,
1022                                 &tcp_request_sock_ipv6_ops, sk, skb);
1023 
1024 drop:
1025         tcp_listendrop(sk);
1026         return 0; /* don't send reset */
1027 }
1028 
1029 static void tcp_v6_restore_cb(struct sk_buff *skb)
1030 {
1031         /* We need to move header back to the beginning if xfrm6_policy_check()
1032          * and tcp_v6_fill_cb() are going to be called again.
1033          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1034          */
1035         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1036                 sizeof(struct inet6_skb_parm));
1037 }
1038 
1039 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1040                                          struct request_sock *req,
1041                                          struct dst_entry *dst,
1042                                          struct request_sock *req_unhash,
1043                                          bool *own_req)
1044 {
1045         struct inet_request_sock *ireq;
1046         struct ipv6_pinfo *newnp;
1047         const struct ipv6_pinfo *np = inet6_sk(sk);
1048         struct ipv6_txoptions *opt;
1049         struct tcp6_sock *newtcp6sk;
1050         struct inet_sock *newinet;
1051         struct tcp_sock *newtp;
1052         struct sock *newsk;
1053 #ifdef CONFIG_TCP_MD5SIG
1054         struct tcp_md5sig_key *key;
1055 #endif
1056         struct flowi6 fl6;
1057 
1058         if (skb->protocol == htons(ETH_P_IP)) {
1059                 /*
1060                  *      v6 mapped
1061                  */
1062 
1063                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1064                                              req_unhash, own_req);
1065 
1066                 if (!newsk)
1067                         return NULL;
1068 
1069                 newtcp6sk = (struct tcp6_sock *)newsk;
1070                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1071 
1072                 newinet = inet_sk(newsk);
1073                 newnp = inet6_sk(newsk);
1074                 newtp = tcp_sk(newsk);
1075 
1076                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1077 
1078                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1079 
1080                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1081                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1082 #ifdef CONFIG_TCP_MD5SIG
1083                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1084 #endif
1085 
1086                 newnp->ipv6_mc_list = NULL;
1087                 newnp->ipv6_ac_list = NULL;
1088                 newnp->ipv6_fl_list = NULL;
1089                 newnp->pktoptions  = NULL;
1090                 newnp->opt         = NULL;
1091                 newnp->mcast_oif   = tcp_v6_iif(skb);
1092                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1093                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1094                 if (np->repflow)
1095                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1096 
1097                 /*
1098                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1099                  * here, tcp_create_openreq_child now does this for us, see the comment in
1100                  * that function for the gory details. -acme
1101                  */
1102 
1103                 /* It is tricky place. Until this moment IPv4 tcp
1104                    worked with IPv6 icsk.icsk_af_ops.
1105                    Sync it now.
1106                  */
1107                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1108 
1109                 return newsk;
1110         }
1111 
1112         ireq = inet_rsk(req);
1113 
1114         if (sk_acceptq_is_full(sk))
1115                 goto out_overflow;
1116 
1117         if (!dst) {
1118                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1119                 if (!dst)
1120                         goto out;
1121         }
1122 
1123         newsk = tcp_create_openreq_child(sk, req, skb);
1124         if (!newsk)
1125                 goto out_nonewsk;
1126 
1127         /*
1128          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1129          * count here, tcp_create_openreq_child now does this for us, see the
1130          * comment in that function for the gory details. -acme
1131          */
1132 
1133         newsk->sk_gso_type = SKB_GSO_TCPV6;
1134         ip6_dst_store(newsk, dst, NULL, NULL);
1135         inet6_sk_rx_dst_set(newsk, skb);
1136 
1137         newtcp6sk = (struct tcp6_sock *)newsk;
1138         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1139 
1140         newtp = tcp_sk(newsk);
1141         newinet = inet_sk(newsk);
1142         newnp = inet6_sk(newsk);
1143 
1144         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1145 
1146         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1147         newnp->saddr = ireq->ir_v6_loc_addr;
1148         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1149         newsk->sk_bound_dev_if = ireq->ir_iif;
1150 
1151         /* Now IPv6 options...
1152 
1153            First: no IPv4 options.
1154          */
1155         newinet->inet_opt = NULL;
1156         newnp->ipv6_mc_list = NULL;
1157         newnp->ipv6_ac_list = NULL;
1158         newnp->ipv6_fl_list = NULL;
1159 
1160         /* Clone RX bits */
1161         newnp->rxopt.all = np->rxopt.all;
1162 
1163         newnp->pktoptions = NULL;
1164         newnp->opt        = NULL;
1165         newnp->mcast_oif  = tcp_v6_iif(skb);
1166         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1167         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1168         if (np->repflow)
1169                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1170 
1171         /* Clone native IPv6 options from listening socket (if any)
1172 
1173            Yes, keeping reference count would be much more clever,
1174            but we make one more one thing there: reattach optmem
1175            to newsk.
1176          */
1177         opt = ireq->ipv6_opt;
1178         if (!opt)
1179                 opt = rcu_dereference(np->opt);
1180         if (opt) {
1181                 opt = ipv6_dup_options(newsk, opt);
1182                 RCU_INIT_POINTER(newnp->opt, opt);
1183         }
1184         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1185         if (opt)
1186                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1187                                                     opt->opt_flen;
1188 
1189         tcp_ca_openreq_child(newsk, dst);
1190 
1191         tcp_sync_mss(newsk, dst_mtu(dst));
1192         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1193 
1194         tcp_initialize_rcv_mss(newsk);
1195 
1196         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1197         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1198 
1199 #ifdef CONFIG_TCP_MD5SIG
1200         /* Copy over the MD5 key from the original socket */
1201         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1202         if (key) {
1203                 /* We're using one, so create a matching key
1204                  * on the newsk structure. If we fail to get
1205                  * memory, then we end up not copying the key
1206                  * across. Shucks.
1207                  */
1208                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1209                                AF_INET6, 128, key->key, key->keylen,
1210                                sk_gfp_mask(sk, GFP_ATOMIC));
1211         }
1212 #endif
1213 
1214         if (__inet_inherit_port(sk, newsk) < 0) {
1215                 inet_csk_prepare_forced_close(newsk);
1216                 tcp_done(newsk);
1217                 goto out;
1218         }
1219         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1220         if (*own_req) {
1221                 tcp_move_syn(newtp, req);
1222 
1223                 /* Clone pktoptions received with SYN, if we own the req */
1224                 if (ireq->pktopts) {
1225                         newnp->pktoptions = skb_clone(ireq->pktopts,
1226                                                       sk_gfp_mask(sk, GFP_ATOMIC));
1227                         consume_skb(ireq->pktopts);
1228                         ireq->pktopts = NULL;
1229                         if (newnp->pktoptions) {
1230                                 tcp_v6_restore_cb(newnp->pktoptions);
1231                                 skb_set_owner_r(newnp->pktoptions, newsk);
1232                         }
1233                 }
1234         }
1235 
1236         return newsk;
1237 
1238 out_overflow:
1239         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1240 out_nonewsk:
1241         dst_release(dst);
1242 out:
1243         tcp_listendrop(sk);
1244         return NULL;
1245 }
1246 
1247 /* The socket must have it's spinlock held when we get
1248  * here, unless it is a TCP_LISTEN socket.
1249  *
1250  * We have a potential double-lock case here, so even when
1251  * doing backlog processing we use the BH locking scheme.
1252  * This is because we cannot sleep with the original spinlock
1253  * held.
1254  */
1255 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1256 {
1257         struct ipv6_pinfo *np = inet6_sk(sk);
1258         struct tcp_sock *tp;
1259         struct sk_buff *opt_skb = NULL;
1260 
1261         /* Imagine: socket is IPv6. IPv4 packet arrives,
1262            goes to IPv4 receive handler and backlogged.
1263            From backlog it always goes here. Kerboom...
1264            Fortunately, tcp_rcv_established and rcv_established
1265            handle them correctly, but it is not case with
1266            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1267          */
1268 
1269         if (skb->protocol == htons(ETH_P_IP))
1270                 return tcp_v4_do_rcv(sk, skb);
1271 
1272         /*
1273          *      socket locking is here for SMP purposes as backlog rcv
1274          *      is currently called with bh processing disabled.
1275          */
1276 
1277         /* Do Stevens' IPV6_PKTOPTIONS.
1278 
1279            Yes, guys, it is the only place in our code, where we
1280            may make it not affecting IPv4.
1281            The rest of code is protocol independent,
1282            and I do not like idea to uglify IPv4.
1283 
1284            Actually, all the idea behind IPV6_PKTOPTIONS
1285            looks not very well thought. For now we latch
1286            options, received in the last packet, enqueued
1287            by tcp. Feel free to propose better solution.
1288                                                --ANK (980728)
1289          */
1290         if (np->rxopt.all)
1291                 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1292 
1293         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1294                 struct dst_entry *dst = sk->sk_rx_dst;
1295 
1296                 sock_rps_save_rxhash(sk, skb);
1297                 sk_mark_napi_id(sk, skb);
1298                 if (dst) {
1299                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1300                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1301                                 dst_release(dst);
1302                                 sk->sk_rx_dst = NULL;
1303                         }
1304                 }
1305 
1306                 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1307                 if (opt_skb)
1308                         goto ipv6_pktoptions;
1309                 return 0;
1310         }
1311 
1312         if (tcp_checksum_complete(skb))
1313                 goto csum_err;
1314 
1315         if (sk->sk_state == TCP_LISTEN) {
1316                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1317 
1318                 if (!nsk)
1319                         goto discard;
1320 
1321                 if (nsk != sk) {
1322                         if (tcp_child_process(sk, nsk, skb))
1323                                 goto reset;
1324                         if (opt_skb)
1325                                 __kfree_skb(opt_skb);
1326                         return 0;
1327                 }
1328         } else
1329                 sock_rps_save_rxhash(sk, skb);
1330 
1331         if (tcp_rcv_state_process(sk, skb))
1332                 goto reset;
1333         if (opt_skb)
1334                 goto ipv6_pktoptions;
1335         return 0;
1336 
1337 reset:
1338         tcp_v6_send_reset(sk, skb);
1339 discard:
1340         if (opt_skb)
1341                 __kfree_skb(opt_skb);
1342         kfree_skb(skb);
1343         return 0;
1344 csum_err:
1345         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1346         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1347         goto discard;
1348 
1349 
1350 ipv6_pktoptions:
1351         /* Do you ask, what is it?
1352 
1353            1. skb was enqueued by tcp.
1354            2. skb is added to tail of read queue, rather than out of order.
1355            3. socket is not in passive state.
1356            4. Finally, it really contains options, which user wants to receive.
1357          */
1358         tp = tcp_sk(sk);
1359         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1360             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1361                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1362                         np->mcast_oif = tcp_v6_iif(opt_skb);
1363                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1364                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1365                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1366                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1367                 if (np->repflow)
1368                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1369                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1370                         skb_set_owner_r(opt_skb, sk);
1371                         tcp_v6_restore_cb(opt_skb);
1372                         opt_skb = xchg(&np->pktoptions, opt_skb);
1373                 } else {
1374                         __kfree_skb(opt_skb);
1375                         opt_skb = xchg(&np->pktoptions, NULL);
1376                 }
1377         }
1378 
1379         kfree_skb(opt_skb);
1380         return 0;
1381 }
1382 
1383 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1384                            const struct tcphdr *th)
1385 {
1386         /* This is tricky: we move IP6CB at its correct location into
1387          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1388          * _decode_session6() uses IP6CB().
1389          * barrier() makes sure compiler won't play aliasing games.
1390          */
1391         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1392                 sizeof(struct inet6_skb_parm));
1393         barrier();
1394 
1395         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1396         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1397                                     skb->len - th->doff*4);
1398         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1399         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1400         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1401         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1402         TCP_SKB_CB(skb)->sacked = 0;
1403         TCP_SKB_CB(skb)->has_rxtstamp =
1404                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1405 }
1406 
1407 static int tcp_v6_rcv(struct sk_buff *skb)
1408 {
1409         int sdif = inet6_sdif(skb);
1410         const struct tcphdr *th;
1411         const struct ipv6hdr *hdr;
1412         bool refcounted;
1413         struct sock *sk;
1414         int ret;
1415         struct net *net = dev_net(skb->dev);
1416 
1417         if (skb->pkt_type != PACKET_HOST)
1418                 goto discard_it;
1419 
1420         /*
1421          *      Count it even if it's bad.
1422          */
1423         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1424 
1425         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1426                 goto discard_it;
1427 
1428         th = (const struct tcphdr *)skb->data;
1429 
1430         if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1431                 goto bad_packet;
1432         if (!pskb_may_pull(skb, th->doff*4))
1433                 goto discard_it;
1434 
1435         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1436                 goto csum_error;
1437 
1438         th = (const struct tcphdr *)skb->data;
1439         hdr = ipv6_hdr(skb);
1440 
1441 lookup:
1442         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1443                                 th->source, th->dest, inet6_iif(skb), sdif,
1444                                 &refcounted);
1445         if (!sk)
1446                 goto no_tcp_socket;
1447 
1448 process:
1449         if (sk->sk_state == TCP_TIME_WAIT)
1450                 goto do_time_wait;
1451 
1452         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1453                 struct request_sock *req = inet_reqsk(sk);
1454                 struct sock *nsk;
1455 
1456                 sk = req->rsk_listener;
1457                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1458                         sk_drops_add(sk, skb);
1459                         reqsk_put(req);
1460                         goto discard_it;
1461                 }
1462                 if (tcp_checksum_complete(skb)) {
1463                         reqsk_put(req);
1464                         goto csum_error;
1465                 }
1466                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1467                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1468                         goto lookup;
1469                 }
1470                 sock_hold(sk);
1471                 refcounted = true;
1472                 nsk = NULL;
1473                 if (!tcp_filter(sk, skb)) {
1474                         th = (const struct tcphdr *)skb->data;
1475                         hdr = ipv6_hdr(skb);
1476                         tcp_v6_fill_cb(skb, hdr, th);
1477                         nsk = tcp_check_req(sk, skb, req, false);
1478                 }
1479                 if (!nsk) {
1480                         reqsk_put(req);
1481                         goto discard_and_relse;
1482                 }
1483                 if (nsk == sk) {
1484                         reqsk_put(req);
1485                         tcp_v6_restore_cb(skb);
1486                 } else if (tcp_child_process(sk, nsk, skb)) {
1487                         tcp_v6_send_reset(nsk, skb);
1488                         goto discard_and_relse;
1489                 } else {
1490                         sock_put(sk);
1491                         return 0;
1492                 }
1493         }
1494         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1495                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1496                 goto discard_and_relse;
1497         }
1498 
1499         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1500                 goto discard_and_relse;
1501 
1502         if (tcp_v6_inbound_md5_hash(sk, skb))
1503                 goto discard_and_relse;
1504 
1505         if (tcp_filter(sk, skb))
1506                 goto discard_and_relse;
1507         th = (const struct tcphdr *)skb->data;
1508         hdr = ipv6_hdr(skb);
1509         tcp_v6_fill_cb(skb, hdr, th);
1510 
1511         skb->dev = NULL;
1512 
1513         if (sk->sk_state == TCP_LISTEN) {
1514                 ret = tcp_v6_do_rcv(sk, skb);
1515                 goto put_and_return;
1516         }
1517 
1518         sk_incoming_cpu_update(sk);
1519 
1520         bh_lock_sock_nested(sk);
1521         tcp_segs_in(tcp_sk(sk), skb);
1522         ret = 0;
1523         if (!sock_owned_by_user(sk)) {
1524                 ret = tcp_v6_do_rcv(sk, skb);
1525         } else if (tcp_add_backlog(sk, skb)) {
1526                 goto discard_and_relse;
1527         }
1528         bh_unlock_sock(sk);
1529 
1530 put_and_return:
1531         if (refcounted)
1532                 sock_put(sk);
1533         return ret ? -1 : 0;
1534 
1535 no_tcp_socket:
1536         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1537                 goto discard_it;
1538 
1539         tcp_v6_fill_cb(skb, hdr, th);
1540 
1541         if (tcp_checksum_complete(skb)) {
1542 csum_error:
1543                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1544 bad_packet:
1545                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1546         } else {
1547                 tcp_v6_send_reset(NULL, skb);
1548         }
1549 
1550 discard_it:
1551         kfree_skb(skb);
1552         return 0;
1553 
1554 discard_and_relse:
1555         sk_drops_add(sk, skb);
1556         if (refcounted)
1557                 sock_put(sk);
1558         goto discard_it;
1559 
1560 do_time_wait:
1561         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1562                 inet_twsk_put(inet_twsk(sk));
1563                 goto discard_it;
1564         }
1565 
1566         tcp_v6_fill_cb(skb, hdr, th);
1567 
1568         if (tcp_checksum_complete(skb)) {
1569                 inet_twsk_put(inet_twsk(sk));
1570                 goto csum_error;
1571         }
1572 
1573         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1574         case TCP_TW_SYN:
1575         {
1576                 struct sock *sk2;
1577 
1578                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1579                                             skb, __tcp_hdrlen(th),
1580                                             &ipv6_hdr(skb)->saddr, th->source,
1581                                             &ipv6_hdr(skb)->daddr,
1582                                             ntohs(th->dest), tcp_v6_iif(skb),
1583                                             sdif);
1584                 if (sk2) {
1585                         struct inet_timewait_sock *tw = inet_twsk(sk);
1586                         inet_twsk_deschedule_put(tw);
1587                         sk = sk2;
1588                         tcp_v6_restore_cb(skb);
1589                         refcounted = false;
1590                         goto process;
1591                 }
1592         }
1593                 /* to ACK */
1594                 /* fall through */
1595         case TCP_TW_ACK:
1596                 tcp_v6_timewait_ack(sk, skb);
1597                 break;
1598         case TCP_TW_RST:
1599                 tcp_v6_send_reset(sk, skb);
1600                 inet_twsk_deschedule_put(inet_twsk(sk));
1601                 goto discard_it;
1602         case TCP_TW_SUCCESS:
1603                 ;
1604         }
1605         goto discard_it;
1606 }
1607 
1608 static void tcp_v6_early_demux(struct sk_buff *skb)
1609 {
1610         const struct ipv6hdr *hdr;
1611         const struct tcphdr *th;
1612         struct sock *sk;
1613 
1614         if (skb->pkt_type != PACKET_HOST)
1615                 return;
1616 
1617         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1618                 return;
1619 
1620         hdr = ipv6_hdr(skb);
1621         th = tcp_hdr(skb);
1622 
1623         if (th->doff < sizeof(struct tcphdr) / 4)
1624                 return;
1625 
1626         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1627         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1628                                         &hdr->saddr, th->source,
1629                                         &hdr->daddr, ntohs(th->dest),
1630                                         inet6_iif(skb), inet6_sdif(skb));
1631         if (sk) {
1632                 skb->sk = sk;
1633                 skb->destructor = sock_edemux;
1634                 if (sk_fullsock(sk)) {
1635                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1636 
1637                         if (dst)
1638                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1639                         if (dst &&
1640                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1641                                 skb_dst_set_noref(skb, dst);
1642                 }
1643         }
1644 }
1645 
1646 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1647         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1648         .twsk_unique    = tcp_twsk_unique,
1649         .twsk_destructor = tcp_twsk_destructor,
1650 };
1651 
1652 static const struct inet_connection_sock_af_ops ipv6_specific = {
1653         .queue_xmit        = inet6_csk_xmit,
1654         .send_check        = tcp_v6_send_check,
1655         .rebuild_header    = inet6_sk_rebuild_header,
1656         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1657         .conn_request      = tcp_v6_conn_request,
1658         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1659         .net_header_len    = sizeof(struct ipv6hdr),
1660         .net_frag_header_len = sizeof(struct frag_hdr),
1661         .setsockopt        = ipv6_setsockopt,
1662         .getsockopt        = ipv6_getsockopt,
1663         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1664         .sockaddr_len      = sizeof(struct sockaddr_in6),
1665 #ifdef CONFIG_COMPAT
1666         .compat_setsockopt = compat_ipv6_setsockopt,
1667         .compat_getsockopt = compat_ipv6_getsockopt,
1668 #endif
1669         .mtu_reduced       = tcp_v6_mtu_reduced,
1670 };
1671 
1672 #ifdef CONFIG_TCP_MD5SIG
1673 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1674         .md5_lookup     =       tcp_v6_md5_lookup,
1675         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1676         .md5_parse      =       tcp_v6_parse_md5_keys,
1677 };
1678 #endif
1679 
1680 /*
1681  *      TCP over IPv4 via INET6 API
1682  */
1683 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1684         .queue_xmit        = ip_queue_xmit,
1685         .send_check        = tcp_v4_send_check,
1686         .rebuild_header    = inet_sk_rebuild_header,
1687         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1688         .conn_request      = tcp_v6_conn_request,
1689         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1690         .net_header_len    = sizeof(struct iphdr),
1691         .setsockopt        = ipv6_setsockopt,
1692         .getsockopt        = ipv6_getsockopt,
1693         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1694         .sockaddr_len      = sizeof(struct sockaddr_in6),
1695 #ifdef CONFIG_COMPAT
1696         .compat_setsockopt = compat_ipv6_setsockopt,
1697         .compat_getsockopt = compat_ipv6_getsockopt,
1698 #endif
1699         .mtu_reduced       = tcp_v4_mtu_reduced,
1700 };
1701 
1702 #ifdef CONFIG_TCP_MD5SIG
1703 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1704         .md5_lookup     =       tcp_v4_md5_lookup,
1705         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1706         .md5_parse      =       tcp_v6_parse_md5_keys,
1707 };
1708 #endif
1709 
1710 /* NOTE: A lot of things set to zero explicitly by call to
1711  *       sk_alloc() so need not be done here.
1712  */
1713 static int tcp_v6_init_sock(struct sock *sk)
1714 {
1715         struct inet_connection_sock *icsk = inet_csk(sk);
1716 
1717         tcp_init_sock(sk);
1718 
1719         icsk->icsk_af_ops = &ipv6_specific;
1720 
1721 #ifdef CONFIG_TCP_MD5SIG
1722         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1723 #endif
1724 
1725         return 0;
1726 }
1727 
1728 static void tcp_v6_destroy_sock(struct sock *sk)
1729 {
1730         tcp_v4_destroy_sock(sk);
1731         inet6_destroy_sock(sk);
1732 }
1733 
1734 #ifdef CONFIG_PROC_FS
1735 /* Proc filesystem TCPv6 sock list dumping. */
1736 static void get_openreq6(struct seq_file *seq,
1737                          const struct request_sock *req, int i)
1738 {
1739         long ttd = req->rsk_timer.expires - jiffies;
1740         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1741         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1742 
1743         if (ttd < 0)
1744                 ttd = 0;
1745 
1746         seq_printf(seq,
1747                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1748                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1749                    i,
1750                    src->s6_addr32[0], src->s6_addr32[1],
1751                    src->s6_addr32[2], src->s6_addr32[3],
1752                    inet_rsk(req)->ir_num,
1753                    dest->s6_addr32[0], dest->s6_addr32[1],
1754                    dest->s6_addr32[2], dest->s6_addr32[3],
1755                    ntohs(inet_rsk(req)->ir_rmt_port),
1756                    TCP_SYN_RECV,
1757                    0, 0, /* could print option size, but that is af dependent. */
1758                    1,   /* timers active (only the expire timer) */
1759                    jiffies_to_clock_t(ttd),
1760                    req->num_timeout,
1761                    from_kuid_munged(seq_user_ns(seq),
1762                                     sock_i_uid(req->rsk_listener)),
1763                    0,  /* non standard timer */
1764                    0, /* open_requests have no inode */
1765                    0, req);
1766 }
1767 
1768 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1769 {
1770         const struct in6_addr *dest, *src;
1771         __u16 destp, srcp;
1772         int timer_active;
1773         unsigned long timer_expires;
1774         const struct inet_sock *inet = inet_sk(sp);
1775         const struct tcp_sock *tp = tcp_sk(sp);
1776         const struct inet_connection_sock *icsk = inet_csk(sp);
1777         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1778         int rx_queue;
1779         int state;
1780 
1781         dest  = &sp->sk_v6_daddr;
1782         src   = &sp->sk_v6_rcv_saddr;
1783         destp = ntohs(inet->inet_dport);
1784         srcp  = ntohs(inet->inet_sport);
1785 
1786         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1787             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1788             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1789                 timer_active    = 1;
1790                 timer_expires   = icsk->icsk_timeout;
1791         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1792                 timer_active    = 4;
1793                 timer_expires   = icsk->icsk_timeout;
1794         } else if (timer_pending(&sp->sk_timer)) {
1795                 timer_active    = 2;
1796                 timer_expires   = sp->sk_timer.expires;
1797         } else {
1798                 timer_active    = 0;
1799                 timer_expires = jiffies;
1800         }
1801 
1802         state = inet_sk_state_load(sp);
1803         if (state == TCP_LISTEN)
1804                 rx_queue = sp->sk_ack_backlog;
1805         else
1806                 /* Because we don't lock the socket,
1807                  * we might find a transient negative value.
1808                  */
1809                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1810 
1811         seq_printf(seq,
1812                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1813                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1814                    i,
1815                    src->s6_addr32[0], src->s6_addr32[1],
1816                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1817                    dest->s6_addr32[0], dest->s6_addr32[1],
1818                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1819                    state,
1820                    tp->write_seq - tp->snd_una,
1821                    rx_queue,
1822                    timer_active,
1823                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1824                    icsk->icsk_retransmits,
1825                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1826                    icsk->icsk_probes_out,
1827                    sock_i_ino(sp),
1828                    refcount_read(&sp->sk_refcnt), sp,
1829                    jiffies_to_clock_t(icsk->icsk_rto),
1830                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1831                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1832                    tp->snd_cwnd,
1833                    state == TCP_LISTEN ?
1834                         fastopenq->max_qlen :
1835                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1836                    );
1837 }
1838 
1839 static void get_timewait6_sock(struct seq_file *seq,
1840                                struct inet_timewait_sock *tw, int i)
1841 {
1842         long delta = tw->tw_timer.expires - jiffies;
1843         const struct in6_addr *dest, *src;
1844         __u16 destp, srcp;
1845 
1846         dest = &tw->tw_v6_daddr;
1847         src  = &tw->tw_v6_rcv_saddr;
1848         destp = ntohs(tw->tw_dport);
1849         srcp  = ntohs(tw->tw_sport);
1850 
1851         seq_printf(seq,
1852                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1853                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1854                    i,
1855                    src->s6_addr32[0], src->s6_addr32[1],
1856                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1857                    dest->s6_addr32[0], dest->s6_addr32[1],
1858                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1859                    tw->tw_substate, 0, 0,
1860                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1861                    refcount_read(&tw->tw_refcnt), tw);
1862 }
1863 
1864 static int tcp6_seq_show(struct seq_file *seq, void *v)
1865 {
1866         struct tcp_iter_state *st;
1867         struct sock *sk = v;
1868 
1869         if (v == SEQ_START_TOKEN) {
1870                 seq_puts(seq,
1871                          "  sl  "
1872                          "local_address                         "
1873                          "remote_address                        "
1874                          "st tx_queue rx_queue tr tm->when retrnsmt"
1875                          "   uid  timeout inode\n");
1876                 goto out;
1877         }
1878         st = seq->private;
1879 
1880         if (sk->sk_state == TCP_TIME_WAIT)
1881                 get_timewait6_sock(seq, v, st->num);
1882         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1883                 get_openreq6(seq, v, st->num);
1884         else
1885                 get_tcp6_sock(seq, v, st->num);
1886 out:
1887         return 0;
1888 }
1889 
1890 static const struct file_operations tcp6_afinfo_seq_fops = {
1891         .open    = tcp_seq_open,
1892         .read    = seq_read,
1893         .llseek  = seq_lseek,
1894         .release = seq_release_net
1895 };
1896 
1897 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1898         .name           = "tcp6",
1899         .family         = AF_INET6,
1900         .seq_fops       = &tcp6_afinfo_seq_fops,
1901         .seq_ops        = {
1902                 .show           = tcp6_seq_show,
1903         },
1904 };
1905 
1906 int __net_init tcp6_proc_init(struct net *net)
1907 {
1908         return tcp_proc_register(net, &tcp6_seq_afinfo);
1909 }
1910 
1911 void tcp6_proc_exit(struct net *net)
1912 {
1913         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1914 }
1915 #endif
1916 
1917 struct proto tcpv6_prot = {
1918         .name                   = "TCPv6",
1919         .owner                  = THIS_MODULE,
1920         .close                  = tcp_close,
1921         .connect                = tcp_v6_connect,
1922         .disconnect             = tcp_disconnect,
1923         .accept                 = inet_csk_accept,
1924         .ioctl                  = tcp_ioctl,
1925         .init                   = tcp_v6_init_sock,
1926         .destroy                = tcp_v6_destroy_sock,
1927         .shutdown               = tcp_shutdown,
1928         .setsockopt             = tcp_setsockopt,
1929         .getsockopt             = tcp_getsockopt,
1930         .keepalive              = tcp_set_keepalive,
1931         .recvmsg                = tcp_recvmsg,
1932         .sendmsg                = tcp_sendmsg,
1933         .sendpage               = tcp_sendpage,
1934         .backlog_rcv            = tcp_v6_do_rcv,
1935         .release_cb             = tcp_release_cb,
1936         .hash                   = inet6_hash,
1937         .unhash                 = inet_unhash,
1938         .get_port               = inet_csk_get_port,
1939         .enter_memory_pressure  = tcp_enter_memory_pressure,
1940         .leave_memory_pressure  = tcp_leave_memory_pressure,
1941         .stream_memory_free     = tcp_stream_memory_free,
1942         .sockets_allocated      = &tcp_sockets_allocated,
1943         .memory_allocated       = &tcp_memory_allocated,
1944         .memory_pressure        = &tcp_memory_pressure,
1945         .orphan_count           = &tcp_orphan_count,
1946         .sysctl_mem             = sysctl_tcp_mem,
1947         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1948         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1949         .max_header             = MAX_TCP_HEADER,
1950         .obj_size               = sizeof(struct tcp6_sock),
1951         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
1952         .twsk_prot              = &tcp6_timewait_sock_ops,
1953         .rsk_prot               = &tcp6_request_sock_ops,
1954         .h.hashinfo             = &tcp_hashinfo,
1955         .no_autobind            = true,
1956 #ifdef CONFIG_COMPAT
1957         .compat_setsockopt      = compat_tcp_setsockopt,
1958         .compat_getsockopt      = compat_tcp_getsockopt,
1959 #endif
1960         .diag_destroy           = tcp_abort,
1961 };
1962 
1963 /* thinking of making this const? Don't.
1964  * early_demux can change based on sysctl.
1965  */
1966 static struct inet6_protocol tcpv6_protocol = {
1967         .early_demux    =       tcp_v6_early_demux,
1968         .early_demux_handler =  tcp_v6_early_demux,
1969         .handler        =       tcp_v6_rcv,
1970         .err_handler    =       tcp_v6_err,
1971         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1972 };
1973 
1974 static struct inet_protosw tcpv6_protosw = {
1975         .type           =       SOCK_STREAM,
1976         .protocol       =       IPPROTO_TCP,
1977         .prot           =       &tcpv6_prot,
1978         .ops            =       &inet6_stream_ops,
1979         .flags          =       INET_PROTOSW_PERMANENT |
1980                                 INET_PROTOSW_ICSK,
1981 };
1982 
1983 static int __net_init tcpv6_net_init(struct net *net)
1984 {
1985         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1986                                     SOCK_RAW, IPPROTO_TCP, net);
1987 }
1988 
1989 static void __net_exit tcpv6_net_exit(struct net *net)
1990 {
1991         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1992 }
1993 
1994 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1995 {
1996         inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1997 }
1998 
1999 static struct pernet_operations tcpv6_net_ops = {
2000         .init       = tcpv6_net_init,
2001         .exit       = tcpv6_net_exit,
2002         .exit_batch = tcpv6_net_exit_batch,
2003 };
2004 
2005 int __init tcpv6_init(void)
2006 {
2007         int ret;
2008 
2009         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2010         if (ret)
2011                 goto out;
2012 
2013         /* register inet6 protocol */
2014         ret = inet6_register_protosw(&tcpv6_protosw);
2015         if (ret)
2016                 goto out_tcpv6_protocol;
2017 
2018         ret = register_pernet_subsys(&tcpv6_net_ops);
2019         if (ret)
2020                 goto out_tcpv6_protosw;
2021 out:
2022         return ret;
2023 
2024 out_tcpv6_protosw:
2025         inet6_unregister_protosw(&tcpv6_protosw);
2026 out_tcpv6_protocol:
2027         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2028         goto out;
2029 }
2030 
2031 void tcpv6_exit(void)
2032 {
2033         unregister_pernet_subsys(&tcpv6_net_ops);
2034         inet6_unregister_protosw(&tcpv6_protosw);
2035         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2036 }
2037 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp