~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv6/tcp_ipv6.c

Version: ~ [ linux-5.12 ] ~ [ linux-5.11.16 ] ~ [ linux-5.10.32 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.114 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.188 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.231 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.267 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.267 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      TCP over IPv6
  3  *      Linux INET6 implementation
  4  *
  5  *      Authors:
  6  *      Pedro Roque             <roque@di.fc.ul.pt>
  7  *
  8  *      Based on:
  9  *      linux/net/ipv4/tcp.c
 10  *      linux/net/ipv4/tcp_input.c
 11  *      linux/net/ipv4/tcp_output.c
 12  *
 13  *      Fixes:
 14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
 15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 17  *                                      a single port at the same time.
 18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
 19  *
 20  *      This program is free software; you can redistribute it and/or
 21  *      modify it under the terms of the GNU General Public License
 22  *      as published by the Free Software Foundation; either version
 23  *      2 of the License, or (at your option) any later version.
 24  */
 25 
 26 #include <linux/bottom_half.h>
 27 #include <linux/module.h>
 28 #include <linux/errno.h>
 29 #include <linux/types.h>
 30 #include <linux/socket.h>
 31 #include <linux/sockios.h>
 32 #include <linux/net.h>
 33 #include <linux/jiffies.h>
 34 #include <linux/in.h>
 35 #include <linux/in6.h>
 36 #include <linux/netdevice.h>
 37 #include <linux/init.h>
 38 #include <linux/jhash.h>
 39 #include <linux/ipsec.h>
 40 #include <linux/times.h>
 41 #include <linux/slab.h>
 42 #include <linux/uaccess.h>
 43 #include <linux/ipv6.h>
 44 #include <linux/icmpv6.h>
 45 #include <linux/random.h>
 46 
 47 #include <net/tcp.h>
 48 #include <net/ndisc.h>
 49 #include <net/inet6_hashtables.h>
 50 #include <net/inet6_connection_sock.h>
 51 #include <net/ipv6.h>
 52 #include <net/transp_v6.h>
 53 #include <net/addrconf.h>
 54 #include <net/ip6_route.h>
 55 #include <net/ip6_checksum.h>
 56 #include <net/inet_ecn.h>
 57 #include <net/protocol.h>
 58 #include <net/xfrm.h>
 59 #include <net/snmp.h>
 60 #include <net/dsfield.h>
 61 #include <net/timewait_sock.h>
 62 #include <net/inet_common.h>
 63 #include <net/secure_seq.h>
 64 #include <net/busy_poll.h>
 65 
 66 #include <linux/proc_fs.h>
 67 #include <linux/seq_file.h>
 68 
 69 #include <crypto/hash.h>
 70 #include <linux/scatterlist.h>
 71 
 72 #include <trace/events/tcp.h>
 73 
 74 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
 75 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 76                                       struct request_sock *req);
 77 
 78 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 79 
 80 static const struct inet_connection_sock_af_ops ipv6_mapped;
 81 static const struct inet_connection_sock_af_ops ipv6_specific;
 82 #ifdef CONFIG_TCP_MD5SIG
 83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
 84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 85 #else
 86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 87                                                    const struct in6_addr *addr)
 88 {
 89         return NULL;
 90 }
 91 #endif
 92 
 93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 94 {
 95         struct dst_entry *dst = skb_dst(skb);
 96 
 97         if (dst && dst_hold_safe(dst)) {
 98                 const struct rt6_info *rt = (const struct rt6_info *)dst;
 99 
100                 sk->sk_rx_dst = dst;
101                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103         }
104 }
105 
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108         return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109                                 ipv6_hdr(skb)->saddr.s6_addr32,
110                                 tcp_hdr(skb)->dest,
111                                 tcp_hdr(skb)->source);
112 }
113 
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116         return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117                                    ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119 
120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
121                           int addr_len)
122 {
123         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
124         struct inet_sock *inet = inet_sk(sk);
125         struct inet_connection_sock *icsk = inet_csk(sk);
126         struct ipv6_pinfo *np = inet6_sk(sk);
127         struct tcp_sock *tp = tcp_sk(sk);
128         struct in6_addr *saddr = NULL, *final_p, final;
129         struct ipv6_txoptions *opt;
130         struct flowi6 fl6;
131         struct dst_entry *dst;
132         int addr_type;
133         int err;
134         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
135 
136         if (addr_len < SIN6_LEN_RFC2133)
137                 return -EINVAL;
138 
139         if (usin->sin6_family != AF_INET6)
140                 return -EAFNOSUPPORT;
141 
142         memset(&fl6, 0, sizeof(fl6));
143 
144         if (np->sndflow) {
145                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146                 IP6_ECN_flow_init(fl6.flowlabel);
147                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
148                         struct ip6_flowlabel *flowlabel;
149                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
150                         if (!flowlabel)
151                                 return -EINVAL;
152                         fl6_sock_release(flowlabel);
153                 }
154         }
155 
156         /*
157          *      connect() to INADDR_ANY means loopback (BSD'ism).
158          */
159 
160         if (ipv6_addr_any(&usin->sin6_addr)) {
161                 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
162                         ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
163                                                &usin->sin6_addr);
164                 else
165                         usin->sin6_addr = in6addr_loopback;
166         }
167 
168         addr_type = ipv6_addr_type(&usin->sin6_addr);
169 
170         if (addr_type & IPV6_ADDR_MULTICAST)
171                 return -ENETUNREACH;
172 
173         if (addr_type&IPV6_ADDR_LINKLOCAL) {
174                 if (addr_len >= sizeof(struct sockaddr_in6) &&
175                     usin->sin6_scope_id) {
176                         /* If interface is set while binding, indices
177                          * must coincide.
178                          */
179                         if (sk->sk_bound_dev_if &&
180                             sk->sk_bound_dev_if != usin->sin6_scope_id)
181                                 return -EINVAL;
182 
183                         sk->sk_bound_dev_if = usin->sin6_scope_id;
184                 }
185 
186                 /* Connect to link-local address requires an interface */
187                 if (!sk->sk_bound_dev_if)
188                         return -EINVAL;
189         }
190 
191         if (tp->rx_opt.ts_recent_stamp &&
192             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
193                 tp->rx_opt.ts_recent = 0;
194                 tp->rx_opt.ts_recent_stamp = 0;
195                 tp->write_seq = 0;
196         }
197 
198         sk->sk_v6_daddr = usin->sin6_addr;
199         np->flow_label = fl6.flowlabel;
200 
201         /*
202          *      TCP over IPv4
203          */
204 
205         if (addr_type & IPV6_ADDR_MAPPED) {
206                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207                 struct sockaddr_in sin;
208 
209                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210 
211                 if (__ipv6_only_sock(sk))
212                         return -ENETUNREACH;
213 
214                 sin.sin_family = AF_INET;
215                 sin.sin_port = usin->sin6_port;
216                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217 
218                 icsk->icsk_af_ops = &ipv6_mapped;
219                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222 #endif
223 
224                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225 
226                 if (err) {
227                         icsk->icsk_ext_hdr_len = exthdrlen;
228                         icsk->icsk_af_ops = &ipv6_specific;
229                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231                         tp->af_specific = &tcp_sock_ipv6_specific;
232 #endif
233                         goto failure;
234                 }
235                 np->saddr = sk->sk_v6_rcv_saddr;
236 
237                 return err;
238         }
239 
240         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
241                 saddr = &sk->sk_v6_rcv_saddr;
242 
243         fl6.flowi6_proto = IPPROTO_TCP;
244         fl6.daddr = sk->sk_v6_daddr;
245         fl6.saddr = saddr ? *saddr : np->saddr;
246         fl6.flowi6_oif = sk->sk_bound_dev_if;
247         fl6.flowi6_mark = sk->sk_mark;
248         fl6.fl6_dport = usin->sin6_port;
249         fl6.fl6_sport = inet->inet_sport;
250         fl6.flowi6_uid = sk->sk_uid;
251 
252         opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
253         final_p = fl6_update_dst(&fl6, opt, &final);
254 
255         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
256 
257         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
258         if (IS_ERR(dst)) {
259                 err = PTR_ERR(dst);
260                 goto failure;
261         }
262 
263         if (!saddr) {
264                 saddr = &fl6.saddr;
265                 sk->sk_v6_rcv_saddr = *saddr;
266         }
267 
268         /* set the source address */
269         np->saddr = *saddr;
270         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
271 
272         sk->sk_gso_type = SKB_GSO_TCPV6;
273         ip6_dst_store(sk, dst, NULL, NULL);
274 
275         icsk->icsk_ext_hdr_len = 0;
276         if (opt)
277                 icsk->icsk_ext_hdr_len = opt->opt_flen +
278                                          opt->opt_nflen;
279 
280         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
281 
282         inet->inet_dport = usin->sin6_port;
283 
284         tcp_set_state(sk, TCP_SYN_SENT);
285         err = inet6_hash_connect(tcp_death_row, sk);
286         if (err)
287                 goto late_failure;
288 
289         sk_set_txhash(sk);
290 
291         if (likely(!tp->repair)) {
292                 if (!tp->write_seq)
293                         tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
294                                                          sk->sk_v6_daddr.s6_addr32,
295                                                          inet->inet_sport,
296                                                          inet->inet_dport);
297                 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
298                                                    np->saddr.s6_addr32,
299                                                    sk->sk_v6_daddr.s6_addr32);
300         }
301 
302         if (tcp_fastopen_defer_connect(sk, &err))
303                 return err;
304         if (err)
305                 goto late_failure;
306 
307         err = tcp_connect(sk);
308         if (err)
309                 goto late_failure;
310 
311         return 0;
312 
313 late_failure:
314         tcp_set_state(sk, TCP_CLOSE);
315 failure:
316         inet->inet_dport = 0;
317         sk->sk_route_caps = 0;
318         return err;
319 }
320 
321 static void tcp_v6_mtu_reduced(struct sock *sk)
322 {
323         struct dst_entry *dst;
324 
325         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326                 return;
327 
328         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329         if (!dst)
330                 return;
331 
332         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333                 tcp_sync_mss(sk, dst_mtu(dst));
334                 tcp_simple_retransmit(sk);
335         }
336 }
337 
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339                 u8 type, u8 code, int offset, __be32 info)
340 {
341         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343         struct net *net = dev_net(skb->dev);
344         struct request_sock *fastopen;
345         struct ipv6_pinfo *np;
346         struct tcp_sock *tp;
347         __u32 seq, snd_una;
348         struct sock *sk;
349         bool fatal;
350         int err;
351 
352         sk = __inet6_lookup_established(net, &tcp_hashinfo,
353                                         &hdr->daddr, th->dest,
354                                         &hdr->saddr, ntohs(th->source),
355                                         skb->dev->ifindex, inet6_sdif(skb));
356 
357         if (!sk) {
358                 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
359                                   ICMP6_MIB_INERRORS);
360                 return;
361         }
362 
363         if (sk->sk_state == TCP_TIME_WAIT) {
364                 inet_twsk_put(inet_twsk(sk));
365                 return;
366         }
367         seq = ntohl(th->seq);
368         fatal = icmpv6_err_convert(type, code, &err);
369         if (sk->sk_state == TCP_NEW_SYN_RECV)
370                 return tcp_req_err(sk, seq, fatal);
371 
372         bh_lock_sock(sk);
373         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
374                 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 
376         if (sk->sk_state == TCP_CLOSE)
377                 goto out;
378 
379         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
380                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
381                 goto out;
382         }
383 
384         tp = tcp_sk(sk);
385         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
386         fastopen = tp->fastopen_rsk;
387         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
388         if (sk->sk_state != TCP_LISTEN &&
389             !between(seq, snd_una, tp->snd_nxt)) {
390                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
391                 goto out;
392         }
393 
394         np = inet6_sk(sk);
395 
396         if (type == NDISC_REDIRECT) {
397                 if (!sock_owned_by_user(sk)) {
398                         struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
399 
400                         if (dst)
401                                 dst->ops->redirect(dst, sk, skb);
402                 }
403                 goto out;
404         }
405 
406         if (type == ICMPV6_PKT_TOOBIG) {
407                 /* We are not interested in TCP_LISTEN and open_requests
408                  * (SYN-ACKs send out by Linux are always <576bytes so
409                  * they should go through unfragmented).
410                  */
411                 if (sk->sk_state == TCP_LISTEN)
412                         goto out;
413 
414                 if (!ip6_sk_accept_pmtu(sk))
415                         goto out;
416 
417                 tp->mtu_info = ntohl(info);
418                 if (!sock_owned_by_user(sk))
419                         tcp_v6_mtu_reduced(sk);
420                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
421                                            &sk->sk_tsq_flags))
422                         sock_hold(sk);
423                 goto out;
424         }
425 
426 
427         /* Might be for an request_sock */
428         switch (sk->sk_state) {
429         case TCP_SYN_SENT:
430         case TCP_SYN_RECV:
431                 /* Only in fast or simultaneous open. If a fast open socket is
432                  * is already accepted it is treated as a connected one below.
433                  */
434                 if (fastopen && !fastopen->sk)
435                         break;
436 
437                 if (!sock_owned_by_user(sk)) {
438                         sk->sk_err = err;
439                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
440 
441                         tcp_done(sk);
442                 } else
443                         sk->sk_err_soft = err;
444                 goto out;
445         }
446 
447         if (!sock_owned_by_user(sk) && np->recverr) {
448                 sk->sk_err = err;
449                 sk->sk_error_report(sk);
450         } else
451                 sk->sk_err_soft = err;
452 
453 out:
454         bh_unlock_sock(sk);
455         sock_put(sk);
456 }
457 
458 
459 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
460                               struct flowi *fl,
461                               struct request_sock *req,
462                               struct tcp_fastopen_cookie *foc,
463                               enum tcp_synack_type synack_type)
464 {
465         struct inet_request_sock *ireq = inet_rsk(req);
466         struct ipv6_pinfo *np = inet6_sk(sk);
467         struct ipv6_txoptions *opt;
468         struct flowi6 *fl6 = &fl->u.ip6;
469         struct sk_buff *skb;
470         int err = -ENOMEM;
471 
472         /* First, grab a route. */
473         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
474                                                IPPROTO_TCP)) == NULL)
475                 goto done;
476 
477         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
478 
479         if (skb) {
480                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
481                                     &ireq->ir_v6_rmt_addr);
482 
483                 fl6->daddr = ireq->ir_v6_rmt_addr;
484                 if (np->repflow && ireq->pktopts)
485                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
486 
487                 rcu_read_lock();
488                 opt = ireq->ipv6_opt;
489                 if (!opt)
490                         opt = rcu_dereference(np->opt);
491                 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
492                 rcu_read_unlock();
493                 err = net_xmit_eval(err);
494         }
495 
496 done:
497         return err;
498 }
499 
500 
501 static void tcp_v6_reqsk_destructor(struct request_sock *req)
502 {
503         kfree(inet_rsk(req)->ipv6_opt);
504         kfree_skb(inet_rsk(req)->pktopts);
505 }
506 
507 #ifdef CONFIG_TCP_MD5SIG
508 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
509                                                    const struct in6_addr *addr)
510 {
511         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
512 }
513 
514 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
515                                                 const struct sock *addr_sk)
516 {
517         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
518 }
519 
520 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
521                                  char __user *optval, int optlen)
522 {
523         struct tcp_md5sig cmd;
524         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
525         u8 prefixlen;
526 
527         if (optlen < sizeof(cmd))
528                 return -EINVAL;
529 
530         if (copy_from_user(&cmd, optval, sizeof(cmd)))
531                 return -EFAULT;
532 
533         if (sin6->sin6_family != AF_INET6)
534                 return -EINVAL;
535 
536         if (optname == TCP_MD5SIG_EXT &&
537             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
538                 prefixlen = cmd.tcpm_prefixlen;
539                 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
540                                         prefixlen > 32))
541                         return -EINVAL;
542         } else {
543                 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
544         }
545 
546         if (!cmd.tcpm_keylen) {
547                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
548                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
549                                               AF_INET, prefixlen);
550                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
551                                       AF_INET6, prefixlen);
552         }
553 
554         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
555                 return -EINVAL;
556 
557         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
558                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
559                                       AF_INET, prefixlen, cmd.tcpm_key,
560                                       cmd.tcpm_keylen, GFP_KERNEL);
561 
562         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
563                               AF_INET6, prefixlen, cmd.tcpm_key,
564                               cmd.tcpm_keylen, GFP_KERNEL);
565 }
566 
567 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
568                                    const struct in6_addr *daddr,
569                                    const struct in6_addr *saddr,
570                                    const struct tcphdr *th, int nbytes)
571 {
572         struct tcp6_pseudohdr *bp;
573         struct scatterlist sg;
574         struct tcphdr *_th;
575 
576         bp = hp->scratch;
577         /* 1. TCP pseudo-header (RFC2460) */
578         bp->saddr = *saddr;
579         bp->daddr = *daddr;
580         bp->protocol = cpu_to_be32(IPPROTO_TCP);
581         bp->len = cpu_to_be32(nbytes);
582 
583         _th = (struct tcphdr *)(bp + 1);
584         memcpy(_th, th, sizeof(*th));
585         _th->check = 0;
586 
587         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
588         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
589                                 sizeof(*bp) + sizeof(*th));
590         return crypto_ahash_update(hp->md5_req);
591 }
592 
593 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
594                                const struct in6_addr *daddr, struct in6_addr *saddr,
595                                const struct tcphdr *th)
596 {
597         struct tcp_md5sig_pool *hp;
598         struct ahash_request *req;
599 
600         hp = tcp_get_md5sig_pool();
601         if (!hp)
602                 goto clear_hash_noput;
603         req = hp->md5_req;
604 
605         if (crypto_ahash_init(req))
606                 goto clear_hash;
607         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
608                 goto clear_hash;
609         if (tcp_md5_hash_key(hp, key))
610                 goto clear_hash;
611         ahash_request_set_crypt(req, NULL, md5_hash, 0);
612         if (crypto_ahash_final(req))
613                 goto clear_hash;
614 
615         tcp_put_md5sig_pool();
616         return 0;
617 
618 clear_hash:
619         tcp_put_md5sig_pool();
620 clear_hash_noput:
621         memset(md5_hash, 0, 16);
622         return 1;
623 }
624 
625 static int tcp_v6_md5_hash_skb(char *md5_hash,
626                                const struct tcp_md5sig_key *key,
627                                const struct sock *sk,
628                                const struct sk_buff *skb)
629 {
630         const struct in6_addr *saddr, *daddr;
631         struct tcp_md5sig_pool *hp;
632         struct ahash_request *req;
633         const struct tcphdr *th = tcp_hdr(skb);
634 
635         if (sk) { /* valid for establish/request sockets */
636                 saddr = &sk->sk_v6_rcv_saddr;
637                 daddr = &sk->sk_v6_daddr;
638         } else {
639                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640                 saddr = &ip6h->saddr;
641                 daddr = &ip6h->daddr;
642         }
643 
644         hp = tcp_get_md5sig_pool();
645         if (!hp)
646                 goto clear_hash_noput;
647         req = hp->md5_req;
648 
649         if (crypto_ahash_init(req))
650                 goto clear_hash;
651 
652         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
653                 goto clear_hash;
654         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
655                 goto clear_hash;
656         if (tcp_md5_hash_key(hp, key))
657                 goto clear_hash;
658         ahash_request_set_crypt(req, NULL, md5_hash, 0);
659         if (crypto_ahash_final(req))
660                 goto clear_hash;
661 
662         tcp_put_md5sig_pool();
663         return 0;
664 
665 clear_hash:
666         tcp_put_md5sig_pool();
667 clear_hash_noput:
668         memset(md5_hash, 0, 16);
669         return 1;
670 }
671 
672 #endif
673 
674 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
675                                     const struct sk_buff *skb)
676 {
677 #ifdef CONFIG_TCP_MD5SIG
678         const __u8 *hash_location = NULL;
679         struct tcp_md5sig_key *hash_expected;
680         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
681         const struct tcphdr *th = tcp_hdr(skb);
682         int genhash;
683         u8 newhash[16];
684 
685         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
686         hash_location = tcp_parse_md5sig_option(th);
687 
688         /* We've parsed the options - do we have a hash? */
689         if (!hash_expected && !hash_location)
690                 return false;
691 
692         if (hash_expected && !hash_location) {
693                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
694                 return true;
695         }
696 
697         if (!hash_expected && hash_location) {
698                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
699                 return true;
700         }
701 
702         /* check the signature */
703         genhash = tcp_v6_md5_hash_skb(newhash,
704                                       hash_expected,
705                                       NULL, skb);
706 
707         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
708                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
709                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
710                                      genhash ? "failed" : "mismatch",
711                                      &ip6h->saddr, ntohs(th->source),
712                                      &ip6h->daddr, ntohs(th->dest));
713                 return true;
714         }
715 #endif
716         return false;
717 }
718 
719 static void tcp_v6_init_req(struct request_sock *req,
720                             const struct sock *sk_listener,
721                             struct sk_buff *skb)
722 {
723         struct inet_request_sock *ireq = inet_rsk(req);
724         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
725 
726         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
727         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
728 
729         /* So that link locals have meaning */
730         if (!sk_listener->sk_bound_dev_if &&
731             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
732                 ireq->ir_iif = tcp_v6_iif(skb);
733 
734         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
735             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
736              np->rxopt.bits.rxinfo ||
737              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
738              np->rxopt.bits.rxohlim || np->repflow)) {
739                 refcount_inc(&skb->users);
740                 ireq->pktopts = skb;
741         }
742 }
743 
744 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
745                                           struct flowi *fl,
746                                           const struct request_sock *req)
747 {
748         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
749 }
750 
751 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
752         .family         =       AF_INET6,
753         .obj_size       =       sizeof(struct tcp6_request_sock),
754         .rtx_syn_ack    =       tcp_rtx_synack,
755         .send_ack       =       tcp_v6_reqsk_send_ack,
756         .destructor     =       tcp_v6_reqsk_destructor,
757         .send_reset     =       tcp_v6_send_reset,
758         .syn_ack_timeout =      tcp_syn_ack_timeout,
759 };
760 
761 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
762         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
763                                 sizeof(struct ipv6hdr),
764 #ifdef CONFIG_TCP_MD5SIG
765         .req_md5_lookup =       tcp_v6_md5_lookup,
766         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
767 #endif
768         .init_req       =       tcp_v6_init_req,
769 #ifdef CONFIG_SYN_COOKIES
770         .cookie_init_seq =      cookie_v6_init_sequence,
771 #endif
772         .route_req      =       tcp_v6_route_req,
773         .init_seq       =       tcp_v6_init_seq,
774         .init_ts_off    =       tcp_v6_init_ts_off,
775         .send_synack    =       tcp_v6_send_synack,
776 };
777 
778 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
779                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
780                                  int oif, struct tcp_md5sig_key *key, int rst,
781                                  u8 tclass, __be32 label)
782 {
783         const struct tcphdr *th = tcp_hdr(skb);
784         struct tcphdr *t1;
785         struct sk_buff *buff;
786         struct flowi6 fl6;
787         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
788         struct sock *ctl_sk = net->ipv6.tcp_sk;
789         unsigned int tot_len = sizeof(struct tcphdr);
790         struct dst_entry *dst;
791         __be32 *topt;
792 
793         if (tsecr)
794                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
795 #ifdef CONFIG_TCP_MD5SIG
796         if (key)
797                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
798 #endif
799 
800         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
801                          GFP_ATOMIC);
802         if (!buff)
803                 return;
804 
805         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
806 
807         t1 = skb_push(buff, tot_len);
808         skb_reset_transport_header(buff);
809 
810         /* Swap the send and the receive. */
811         memset(t1, 0, sizeof(*t1));
812         t1->dest = th->source;
813         t1->source = th->dest;
814         t1->doff = tot_len / 4;
815         t1->seq = htonl(seq);
816         t1->ack_seq = htonl(ack);
817         t1->ack = !rst || !th->ack;
818         t1->rst = rst;
819         t1->window = htons(win);
820 
821         topt = (__be32 *)(t1 + 1);
822 
823         if (tsecr) {
824                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
825                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
826                 *topt++ = htonl(tsval);
827                 *topt++ = htonl(tsecr);
828         }
829 
830 #ifdef CONFIG_TCP_MD5SIG
831         if (key) {
832                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
833                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
834                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
835                                     &ipv6_hdr(skb)->saddr,
836                                     &ipv6_hdr(skb)->daddr, t1);
837         }
838 #endif
839 
840         memset(&fl6, 0, sizeof(fl6));
841         fl6.daddr = ipv6_hdr(skb)->saddr;
842         fl6.saddr = ipv6_hdr(skb)->daddr;
843         fl6.flowlabel = label;
844 
845         buff->ip_summed = CHECKSUM_PARTIAL;
846         buff->csum = 0;
847 
848         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
849 
850         fl6.flowi6_proto = IPPROTO_TCP;
851         if (rt6_need_strict(&fl6.daddr) && !oif)
852                 fl6.flowi6_oif = tcp_v6_iif(skb);
853         else {
854                 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
855                         oif = skb->skb_iif;
856 
857                 fl6.flowi6_oif = oif;
858         }
859 
860         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
861         fl6.fl6_dport = t1->dest;
862         fl6.fl6_sport = t1->source;
863         fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
864         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
865 
866         /* Pass a socket to ip6_dst_lookup either it is for RST
867          * Underlying function will use this to retrieve the network
868          * namespace
869          */
870         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
871         if (!IS_ERR(dst)) {
872                 skb_dst_set(buff, dst);
873                 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
874                 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
875                 if (rst)
876                         TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
877                 return;
878         }
879 
880         kfree_skb(buff);
881 }
882 
883 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
884 {
885         const struct tcphdr *th = tcp_hdr(skb);
886         u32 seq = 0, ack_seq = 0;
887         struct tcp_md5sig_key *key = NULL;
888 #ifdef CONFIG_TCP_MD5SIG
889         const __u8 *hash_location = NULL;
890         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891         unsigned char newhash[16];
892         int genhash;
893         struct sock *sk1 = NULL;
894 #endif
895         int oif = 0;
896 
897         if (th->rst)
898                 return;
899 
900         /* If sk not NULL, it means we did a successful lookup and incoming
901          * route had to be correct. prequeue might have dropped our dst.
902          */
903         if (!sk && !ipv6_unicast_destination(skb))
904                 return;
905 
906 #ifdef CONFIG_TCP_MD5SIG
907         rcu_read_lock();
908         hash_location = tcp_parse_md5sig_option(th);
909         if (sk && sk_fullsock(sk)) {
910                 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
911         } else if (hash_location) {
912                 /*
913                  * active side is lost. Try to find listening socket through
914                  * source port, and then find md5 key through listening socket.
915                  * we are not loose security here:
916                  * Incoming packet is checked with md5 hash with finding key,
917                  * no RST generated if md5 hash doesn't match.
918                  */
919                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
920                                            &tcp_hashinfo, NULL, 0,
921                                            &ipv6h->saddr,
922                                            th->source, &ipv6h->daddr,
923                                            ntohs(th->source), tcp_v6_iif(skb),
924                                            tcp_v6_sdif(skb));
925                 if (!sk1)
926                         goto out;
927 
928                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
929                 if (!key)
930                         goto out;
931 
932                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
933                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
934                         goto out;
935         }
936 #endif
937 
938         if (th->ack)
939                 seq = ntohl(th->ack_seq);
940         else
941                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
942                           (th->doff << 2);
943 
944         if (sk) {
945                 oif = sk->sk_bound_dev_if;
946                 if (sk_fullsock(sk))
947                         trace_tcp_send_reset(sk, skb);
948         }
949 
950         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
951 
952 #ifdef CONFIG_TCP_MD5SIG
953 out:
954         rcu_read_unlock();
955 #endif
956 }
957 
958 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
959                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
960                             struct tcp_md5sig_key *key, u8 tclass,
961                             __be32 label)
962 {
963         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
964                              tclass, label);
965 }
966 
967 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
968 {
969         struct inet_timewait_sock *tw = inet_twsk(sk);
970         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
971 
972         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
973                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
974                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
975                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
976                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
977 
978         inet_twsk_put(tw);
979 }
980 
981 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
982                                   struct request_sock *req)
983 {
984         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
985          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
986          */
987         /* RFC 7323 2.3
988          * The window field (SEG.WND) of every outgoing segment, with the
989          * exception of <SYN> segments, MUST be right-shifted by
990          * Rcv.Wind.Shift bits:
991          */
992         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
993                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
994                         tcp_rsk(req)->rcv_nxt,
995                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
996                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
997                         req->ts_recent, sk->sk_bound_dev_if,
998                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
999                         0, 0);
1000 }
1001 
1002 
1003 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1004 {
1005 #ifdef CONFIG_SYN_COOKIES
1006         const struct tcphdr *th = tcp_hdr(skb);
1007 
1008         if (!th->syn)
1009                 sk = cookie_v6_check(sk, skb);
1010 #endif
1011         return sk;
1012 }
1013 
1014 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1015 {
1016         if (skb->protocol == htons(ETH_P_IP))
1017                 return tcp_v4_conn_request(sk, skb);
1018 
1019         if (!ipv6_unicast_destination(skb))
1020                 goto drop;
1021 
1022         return tcp_conn_request(&tcp6_request_sock_ops,
1023                                 &tcp_request_sock_ipv6_ops, sk, skb);
1024 
1025 drop:
1026         tcp_listendrop(sk);
1027         return 0; /* don't send reset */
1028 }
1029 
1030 static void tcp_v6_restore_cb(struct sk_buff *skb)
1031 {
1032         /* We need to move header back to the beginning if xfrm6_policy_check()
1033          * and tcp_v6_fill_cb() are going to be called again.
1034          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1035          */
1036         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1037                 sizeof(struct inet6_skb_parm));
1038 }
1039 
1040 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1041                                          struct request_sock *req,
1042                                          struct dst_entry *dst,
1043                                          struct request_sock *req_unhash,
1044                                          bool *own_req)
1045 {
1046         struct inet_request_sock *ireq;
1047         struct ipv6_pinfo *newnp;
1048         const struct ipv6_pinfo *np = inet6_sk(sk);
1049         struct ipv6_txoptions *opt;
1050         struct tcp6_sock *newtcp6sk;
1051         struct inet_sock *newinet;
1052         struct tcp_sock *newtp;
1053         struct sock *newsk;
1054 #ifdef CONFIG_TCP_MD5SIG
1055         struct tcp_md5sig_key *key;
1056 #endif
1057         struct flowi6 fl6;
1058 
1059         if (skb->protocol == htons(ETH_P_IP)) {
1060                 /*
1061                  *      v6 mapped
1062                  */
1063 
1064                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1065                                              req_unhash, own_req);
1066 
1067                 if (!newsk)
1068                         return NULL;
1069 
1070                 newtcp6sk = (struct tcp6_sock *)newsk;
1071                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1072 
1073                 newinet = inet_sk(newsk);
1074                 newnp = inet6_sk(newsk);
1075                 newtp = tcp_sk(newsk);
1076 
1077                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1078 
1079                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1080 
1081                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1082                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1083 #ifdef CONFIG_TCP_MD5SIG
1084                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1085 #endif
1086 
1087                 newnp->ipv6_mc_list = NULL;
1088                 newnp->ipv6_ac_list = NULL;
1089                 newnp->ipv6_fl_list = NULL;
1090                 newnp->pktoptions  = NULL;
1091                 newnp->opt         = NULL;
1092                 newnp->mcast_oif   = tcp_v6_iif(skb);
1093                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1094                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1095                 if (np->repflow)
1096                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1097 
1098                 /*
1099                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1100                  * here, tcp_create_openreq_child now does this for us, see the comment in
1101                  * that function for the gory details. -acme
1102                  */
1103 
1104                 /* It is tricky place. Until this moment IPv4 tcp
1105                    worked with IPv6 icsk.icsk_af_ops.
1106                    Sync it now.
1107                  */
1108                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1109 
1110                 return newsk;
1111         }
1112 
1113         ireq = inet_rsk(req);
1114 
1115         if (sk_acceptq_is_full(sk))
1116                 goto out_overflow;
1117 
1118         if (!dst) {
1119                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1120                 if (!dst)
1121                         goto out;
1122         }
1123 
1124         newsk = tcp_create_openreq_child(sk, req, skb);
1125         if (!newsk)
1126                 goto out_nonewsk;
1127 
1128         /*
1129          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1130          * count here, tcp_create_openreq_child now does this for us, see the
1131          * comment in that function for the gory details. -acme
1132          */
1133 
1134         newsk->sk_gso_type = SKB_GSO_TCPV6;
1135         ip6_dst_store(newsk, dst, NULL, NULL);
1136         inet6_sk_rx_dst_set(newsk, skb);
1137 
1138         newtcp6sk = (struct tcp6_sock *)newsk;
1139         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1140 
1141         newtp = tcp_sk(newsk);
1142         newinet = inet_sk(newsk);
1143         newnp = inet6_sk(newsk);
1144 
1145         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1146 
1147         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1148         newnp->saddr = ireq->ir_v6_loc_addr;
1149         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1150         newsk->sk_bound_dev_if = ireq->ir_iif;
1151 
1152         /* Now IPv6 options...
1153 
1154            First: no IPv4 options.
1155          */
1156         newinet->inet_opt = NULL;
1157         newnp->ipv6_mc_list = NULL;
1158         newnp->ipv6_ac_list = NULL;
1159         newnp->ipv6_fl_list = NULL;
1160 
1161         /* Clone RX bits */
1162         newnp->rxopt.all = np->rxopt.all;
1163 
1164         newnp->pktoptions = NULL;
1165         newnp->opt        = NULL;
1166         newnp->mcast_oif  = tcp_v6_iif(skb);
1167         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1168         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1169         if (np->repflow)
1170                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1171 
1172         /* Clone native IPv6 options from listening socket (if any)
1173 
1174            Yes, keeping reference count would be much more clever,
1175            but we make one more one thing there: reattach optmem
1176            to newsk.
1177          */
1178         opt = ireq->ipv6_opt;
1179         if (!opt)
1180                 opt = rcu_dereference(np->opt);
1181         if (opt) {
1182                 opt = ipv6_dup_options(newsk, opt);
1183                 RCU_INIT_POINTER(newnp->opt, opt);
1184         }
1185         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1186         if (opt)
1187                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1188                                                     opt->opt_flen;
1189 
1190         tcp_ca_openreq_child(newsk, dst);
1191 
1192         tcp_sync_mss(newsk, dst_mtu(dst));
1193         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1194 
1195         tcp_initialize_rcv_mss(newsk);
1196 
1197         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1198         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1199 
1200 #ifdef CONFIG_TCP_MD5SIG
1201         /* Copy over the MD5 key from the original socket */
1202         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1203         if (key) {
1204                 /* We're using one, so create a matching key
1205                  * on the newsk structure. If we fail to get
1206                  * memory, then we end up not copying the key
1207                  * across. Shucks.
1208                  */
1209                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1210                                AF_INET6, 128, key->key, key->keylen,
1211                                sk_gfp_mask(sk, GFP_ATOMIC));
1212         }
1213 #endif
1214 
1215         if (__inet_inherit_port(sk, newsk) < 0) {
1216                 inet_csk_prepare_forced_close(newsk);
1217                 tcp_done(newsk);
1218                 goto out;
1219         }
1220         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1221         if (*own_req) {
1222                 tcp_move_syn(newtp, req);
1223 
1224                 /* Clone pktoptions received with SYN, if we own the req */
1225                 if (ireq->pktopts) {
1226                         newnp->pktoptions = skb_clone(ireq->pktopts,
1227                                                       sk_gfp_mask(sk, GFP_ATOMIC));
1228                         consume_skb(ireq->pktopts);
1229                         ireq->pktopts = NULL;
1230                         if (newnp->pktoptions) {
1231                                 tcp_v6_restore_cb(newnp->pktoptions);
1232                                 skb_set_owner_r(newnp->pktoptions, newsk);
1233                         }
1234                 }
1235         }
1236 
1237         return newsk;
1238 
1239 out_overflow:
1240         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1241 out_nonewsk:
1242         dst_release(dst);
1243 out:
1244         tcp_listendrop(sk);
1245         return NULL;
1246 }
1247 
1248 /* The socket must have it's spinlock held when we get
1249  * here, unless it is a TCP_LISTEN socket.
1250  *
1251  * We have a potential double-lock case here, so even when
1252  * doing backlog processing we use the BH locking scheme.
1253  * This is because we cannot sleep with the original spinlock
1254  * held.
1255  */
1256 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1257 {
1258         struct ipv6_pinfo *np = inet6_sk(sk);
1259         struct tcp_sock *tp;
1260         struct sk_buff *opt_skb = NULL;
1261 
1262         /* Imagine: socket is IPv6. IPv4 packet arrives,
1263            goes to IPv4 receive handler and backlogged.
1264            From backlog it always goes here. Kerboom...
1265            Fortunately, tcp_rcv_established and rcv_established
1266            handle them correctly, but it is not case with
1267            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1268          */
1269 
1270         if (skb->protocol == htons(ETH_P_IP))
1271                 return tcp_v4_do_rcv(sk, skb);
1272 
1273         /*
1274          *      socket locking is here for SMP purposes as backlog rcv
1275          *      is currently called with bh processing disabled.
1276          */
1277 
1278         /* Do Stevens' IPV6_PKTOPTIONS.
1279 
1280            Yes, guys, it is the only place in our code, where we
1281            may make it not affecting IPv4.
1282            The rest of code is protocol independent,
1283            and I do not like idea to uglify IPv4.
1284 
1285            Actually, all the idea behind IPV6_PKTOPTIONS
1286            looks not very well thought. For now we latch
1287            options, received in the last packet, enqueued
1288            by tcp. Feel free to propose better solution.
1289                                                --ANK (980728)
1290          */
1291         if (np->rxopt.all)
1292                 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1293 
1294         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1295                 struct dst_entry *dst = sk->sk_rx_dst;
1296 
1297                 sock_rps_save_rxhash(sk, skb);
1298                 sk_mark_napi_id(sk, skb);
1299                 if (dst) {
1300                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1301                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1302                                 dst_release(dst);
1303                                 sk->sk_rx_dst = NULL;
1304                         }
1305                 }
1306 
1307                 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1308                 if (opt_skb)
1309                         goto ipv6_pktoptions;
1310                 return 0;
1311         }
1312 
1313         if (tcp_checksum_complete(skb))
1314                 goto csum_err;
1315 
1316         if (sk->sk_state == TCP_LISTEN) {
1317                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1318 
1319                 if (!nsk)
1320                         goto discard;
1321 
1322                 if (nsk != sk) {
1323                         if (tcp_child_process(sk, nsk, skb))
1324                                 goto reset;
1325                         if (opt_skb)
1326                                 __kfree_skb(opt_skb);
1327                         return 0;
1328                 }
1329         } else
1330                 sock_rps_save_rxhash(sk, skb);
1331 
1332         if (tcp_rcv_state_process(sk, skb))
1333                 goto reset;
1334         if (opt_skb)
1335                 goto ipv6_pktoptions;
1336         return 0;
1337 
1338 reset:
1339         tcp_v6_send_reset(sk, skb);
1340 discard:
1341         if (opt_skb)
1342                 __kfree_skb(opt_skb);
1343         kfree_skb(skb);
1344         return 0;
1345 csum_err:
1346         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1347         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1348         goto discard;
1349 
1350 
1351 ipv6_pktoptions:
1352         /* Do you ask, what is it?
1353 
1354            1. skb was enqueued by tcp.
1355            2. skb is added to tail of read queue, rather than out of order.
1356            3. socket is not in passive state.
1357            4. Finally, it really contains options, which user wants to receive.
1358          */
1359         tp = tcp_sk(sk);
1360         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1361             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1362                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1363                         np->mcast_oif = tcp_v6_iif(opt_skb);
1364                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1365                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1366                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1367                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1368                 if (np->repflow)
1369                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1370                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1371                         skb_set_owner_r(opt_skb, sk);
1372                         tcp_v6_restore_cb(opt_skb);
1373                         opt_skb = xchg(&np->pktoptions, opt_skb);
1374                 } else {
1375                         __kfree_skb(opt_skb);
1376                         opt_skb = xchg(&np->pktoptions, NULL);
1377                 }
1378         }
1379 
1380         kfree_skb(opt_skb);
1381         return 0;
1382 }
1383 
1384 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1385                            const struct tcphdr *th)
1386 {
1387         /* This is tricky: we move IP6CB at its correct location into
1388          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1389          * _decode_session6() uses IP6CB().
1390          * barrier() makes sure compiler won't play aliasing games.
1391          */
1392         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1393                 sizeof(struct inet6_skb_parm));
1394         barrier();
1395 
1396         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1397         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1398                                     skb->len - th->doff*4);
1399         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1400         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1401         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1402         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1403         TCP_SKB_CB(skb)->sacked = 0;
1404         TCP_SKB_CB(skb)->has_rxtstamp =
1405                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1406 }
1407 
1408 static int tcp_v6_rcv(struct sk_buff *skb)
1409 {
1410         int sdif = inet6_sdif(skb);
1411         const struct tcphdr *th;
1412         const struct ipv6hdr *hdr;
1413         bool refcounted;
1414         struct sock *sk;
1415         int ret;
1416         struct net *net = dev_net(skb->dev);
1417 
1418         if (skb->pkt_type != PACKET_HOST)
1419                 goto discard_it;
1420 
1421         /*
1422          *      Count it even if it's bad.
1423          */
1424         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1425 
1426         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1427                 goto discard_it;
1428 
1429         th = (const struct tcphdr *)skb->data;
1430 
1431         if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1432                 goto bad_packet;
1433         if (!pskb_may_pull(skb, th->doff*4))
1434                 goto discard_it;
1435 
1436         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1437                 goto csum_error;
1438 
1439         th = (const struct tcphdr *)skb->data;
1440         hdr = ipv6_hdr(skb);
1441 
1442 lookup:
1443         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1444                                 th->source, th->dest, inet6_iif(skb), sdif,
1445                                 &refcounted);
1446         if (!sk)
1447                 goto no_tcp_socket;
1448 
1449 process:
1450         if (sk->sk_state == TCP_TIME_WAIT)
1451                 goto do_time_wait;
1452 
1453         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1454                 struct request_sock *req = inet_reqsk(sk);
1455                 struct sock *nsk;
1456 
1457                 sk = req->rsk_listener;
1458                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1459                         sk_drops_add(sk, skb);
1460                         reqsk_put(req);
1461                         goto discard_it;
1462                 }
1463                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1464                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1465                         goto lookup;
1466                 }
1467                 sock_hold(sk);
1468                 refcounted = true;
1469                 nsk = NULL;
1470                 if (!tcp_filter(sk, skb)) {
1471                         th = (const struct tcphdr *)skb->data;
1472                         hdr = ipv6_hdr(skb);
1473                         tcp_v6_fill_cb(skb, hdr, th);
1474                         nsk = tcp_check_req(sk, skb, req, false);
1475                 }
1476                 if (!nsk) {
1477                         reqsk_put(req);
1478                         goto discard_and_relse;
1479                 }
1480                 if (nsk == sk) {
1481                         reqsk_put(req);
1482                         tcp_v6_restore_cb(skb);
1483                 } else if (tcp_child_process(sk, nsk, skb)) {
1484                         tcp_v6_send_reset(nsk, skb);
1485                         goto discard_and_relse;
1486                 } else {
1487                         sock_put(sk);
1488                         return 0;
1489                 }
1490         }
1491         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1492                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1493                 goto discard_and_relse;
1494         }
1495 
1496         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1497                 goto discard_and_relse;
1498 
1499         if (tcp_v6_inbound_md5_hash(sk, skb))
1500                 goto discard_and_relse;
1501 
1502         if (tcp_filter(sk, skb))
1503                 goto discard_and_relse;
1504         th = (const struct tcphdr *)skb->data;
1505         hdr = ipv6_hdr(skb);
1506         tcp_v6_fill_cb(skb, hdr, th);
1507 
1508         skb->dev = NULL;
1509 
1510         if (sk->sk_state == TCP_LISTEN) {
1511                 ret = tcp_v6_do_rcv(sk, skb);
1512                 goto put_and_return;
1513         }
1514 
1515         sk_incoming_cpu_update(sk);
1516 
1517         bh_lock_sock_nested(sk);
1518         tcp_segs_in(tcp_sk(sk), skb);
1519         ret = 0;
1520         if (!sock_owned_by_user(sk)) {
1521                 ret = tcp_v6_do_rcv(sk, skb);
1522         } else if (tcp_add_backlog(sk, skb)) {
1523                 goto discard_and_relse;
1524         }
1525         bh_unlock_sock(sk);
1526 
1527 put_and_return:
1528         if (refcounted)
1529                 sock_put(sk);
1530         return ret ? -1 : 0;
1531 
1532 no_tcp_socket:
1533         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1534                 goto discard_it;
1535 
1536         tcp_v6_fill_cb(skb, hdr, th);
1537 
1538         if (tcp_checksum_complete(skb)) {
1539 csum_error:
1540                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1541 bad_packet:
1542                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1543         } else {
1544                 tcp_v6_send_reset(NULL, skb);
1545         }
1546 
1547 discard_it:
1548         kfree_skb(skb);
1549         return 0;
1550 
1551 discard_and_relse:
1552         sk_drops_add(sk, skb);
1553         if (refcounted)
1554                 sock_put(sk);
1555         goto discard_it;
1556 
1557 do_time_wait:
1558         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1559                 inet_twsk_put(inet_twsk(sk));
1560                 goto discard_it;
1561         }
1562 
1563         tcp_v6_fill_cb(skb, hdr, th);
1564 
1565         if (tcp_checksum_complete(skb)) {
1566                 inet_twsk_put(inet_twsk(sk));
1567                 goto csum_error;
1568         }
1569 
1570         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1571         case TCP_TW_SYN:
1572         {
1573                 struct sock *sk2;
1574 
1575                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1576                                             skb, __tcp_hdrlen(th),
1577                                             &ipv6_hdr(skb)->saddr, th->source,
1578                                             &ipv6_hdr(skb)->daddr,
1579                                             ntohs(th->dest), tcp_v6_iif(skb),
1580                                             sdif);
1581                 if (sk2) {
1582                         struct inet_timewait_sock *tw = inet_twsk(sk);
1583                         inet_twsk_deschedule_put(tw);
1584                         sk = sk2;
1585                         tcp_v6_restore_cb(skb);
1586                         refcounted = false;
1587                         goto process;
1588                 }
1589         }
1590                 /* to ACK */
1591                 /* fall through */
1592         case TCP_TW_ACK:
1593                 tcp_v6_timewait_ack(sk, skb);
1594                 break;
1595         case TCP_TW_RST:
1596                 tcp_v6_send_reset(sk, skb);
1597                 inet_twsk_deschedule_put(inet_twsk(sk));
1598                 goto discard_it;
1599         case TCP_TW_SUCCESS:
1600                 ;
1601         }
1602         goto discard_it;
1603 }
1604 
1605 static void tcp_v6_early_demux(struct sk_buff *skb)
1606 {
1607         const struct ipv6hdr *hdr;
1608         const struct tcphdr *th;
1609         struct sock *sk;
1610 
1611         if (skb->pkt_type != PACKET_HOST)
1612                 return;
1613 
1614         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1615                 return;
1616 
1617         hdr = ipv6_hdr(skb);
1618         th = tcp_hdr(skb);
1619 
1620         if (th->doff < sizeof(struct tcphdr) / 4)
1621                 return;
1622 
1623         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1624         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1625                                         &hdr->saddr, th->source,
1626                                         &hdr->daddr, ntohs(th->dest),
1627                                         inet6_iif(skb), inet6_sdif(skb));
1628         if (sk) {
1629                 skb->sk = sk;
1630                 skb->destructor = sock_edemux;
1631                 if (sk_fullsock(sk)) {
1632                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1633 
1634                         if (dst)
1635                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1636                         if (dst &&
1637                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1638                                 skb_dst_set_noref(skb, dst);
1639                 }
1640         }
1641 }
1642 
1643 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1644         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1645         .twsk_unique    = tcp_twsk_unique,
1646         .twsk_destructor = tcp_twsk_destructor,
1647 };
1648 
1649 static const struct inet_connection_sock_af_ops ipv6_specific = {
1650         .queue_xmit        = inet6_csk_xmit,
1651         .send_check        = tcp_v6_send_check,
1652         .rebuild_header    = inet6_sk_rebuild_header,
1653         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1654         .conn_request      = tcp_v6_conn_request,
1655         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1656         .net_header_len    = sizeof(struct ipv6hdr),
1657         .net_frag_header_len = sizeof(struct frag_hdr),
1658         .setsockopt        = ipv6_setsockopt,
1659         .getsockopt        = ipv6_getsockopt,
1660         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1661         .sockaddr_len      = sizeof(struct sockaddr_in6),
1662 #ifdef CONFIG_COMPAT
1663         .compat_setsockopt = compat_ipv6_setsockopt,
1664         .compat_getsockopt = compat_ipv6_getsockopt,
1665 #endif
1666         .mtu_reduced       = tcp_v6_mtu_reduced,
1667 };
1668 
1669 #ifdef CONFIG_TCP_MD5SIG
1670 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1671         .md5_lookup     =       tcp_v6_md5_lookup,
1672         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1673         .md5_parse      =       tcp_v6_parse_md5_keys,
1674 };
1675 #endif
1676 
1677 /*
1678  *      TCP over IPv4 via INET6 API
1679  */
1680 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1681         .queue_xmit        = ip_queue_xmit,
1682         .send_check        = tcp_v4_send_check,
1683         .rebuild_header    = inet_sk_rebuild_header,
1684         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1685         .conn_request      = tcp_v6_conn_request,
1686         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1687         .net_header_len    = sizeof(struct iphdr),
1688         .setsockopt        = ipv6_setsockopt,
1689         .getsockopt        = ipv6_getsockopt,
1690         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1691         .sockaddr_len      = sizeof(struct sockaddr_in6),
1692 #ifdef CONFIG_COMPAT
1693         .compat_setsockopt = compat_ipv6_setsockopt,
1694         .compat_getsockopt = compat_ipv6_getsockopt,
1695 #endif
1696         .mtu_reduced       = tcp_v4_mtu_reduced,
1697 };
1698 
1699 #ifdef CONFIG_TCP_MD5SIG
1700 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1701         .md5_lookup     =       tcp_v4_md5_lookup,
1702         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1703         .md5_parse      =       tcp_v6_parse_md5_keys,
1704 };
1705 #endif
1706 
1707 /* NOTE: A lot of things set to zero explicitly by call to
1708  *       sk_alloc() so need not be done here.
1709  */
1710 static int tcp_v6_init_sock(struct sock *sk)
1711 {
1712         struct inet_connection_sock *icsk = inet_csk(sk);
1713 
1714         tcp_init_sock(sk);
1715 
1716         icsk->icsk_af_ops = &ipv6_specific;
1717 
1718 #ifdef CONFIG_TCP_MD5SIG
1719         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1720 #endif
1721 
1722         return 0;
1723 }
1724 
1725 static void tcp_v6_destroy_sock(struct sock *sk)
1726 {
1727         tcp_v4_destroy_sock(sk);
1728         inet6_destroy_sock(sk);
1729 }
1730 
1731 #ifdef CONFIG_PROC_FS
1732 /* Proc filesystem TCPv6 sock list dumping. */
1733 static void get_openreq6(struct seq_file *seq,
1734                          const struct request_sock *req, int i)
1735 {
1736         long ttd = req->rsk_timer.expires - jiffies;
1737         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1738         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1739 
1740         if (ttd < 0)
1741                 ttd = 0;
1742 
1743         seq_printf(seq,
1744                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1745                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1746                    i,
1747                    src->s6_addr32[0], src->s6_addr32[1],
1748                    src->s6_addr32[2], src->s6_addr32[3],
1749                    inet_rsk(req)->ir_num,
1750                    dest->s6_addr32[0], dest->s6_addr32[1],
1751                    dest->s6_addr32[2], dest->s6_addr32[3],
1752                    ntohs(inet_rsk(req)->ir_rmt_port),
1753                    TCP_SYN_RECV,
1754                    0, 0, /* could print option size, but that is af dependent. */
1755                    1,   /* timers active (only the expire timer) */
1756                    jiffies_to_clock_t(ttd),
1757                    req->num_timeout,
1758                    from_kuid_munged(seq_user_ns(seq),
1759                                     sock_i_uid(req->rsk_listener)),
1760                    0,  /* non standard timer */
1761                    0, /* open_requests have no inode */
1762                    0, req);
1763 }
1764 
1765 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1766 {
1767         const struct in6_addr *dest, *src;
1768         __u16 destp, srcp;
1769         int timer_active;
1770         unsigned long timer_expires;
1771         const struct inet_sock *inet = inet_sk(sp);
1772         const struct tcp_sock *tp = tcp_sk(sp);
1773         const struct inet_connection_sock *icsk = inet_csk(sp);
1774         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1775         int rx_queue;
1776         int state;
1777 
1778         dest  = &sp->sk_v6_daddr;
1779         src   = &sp->sk_v6_rcv_saddr;
1780         destp = ntohs(inet->inet_dport);
1781         srcp  = ntohs(inet->inet_sport);
1782 
1783         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1784             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1785             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1786                 timer_active    = 1;
1787                 timer_expires   = icsk->icsk_timeout;
1788         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1789                 timer_active    = 4;
1790                 timer_expires   = icsk->icsk_timeout;
1791         } else if (timer_pending(&sp->sk_timer)) {
1792                 timer_active    = 2;
1793                 timer_expires   = sp->sk_timer.expires;
1794         } else {
1795                 timer_active    = 0;
1796                 timer_expires = jiffies;
1797         }
1798 
1799         state = sk_state_load(sp);
1800         if (state == TCP_LISTEN)
1801                 rx_queue = sp->sk_ack_backlog;
1802         else
1803                 /* Because we don't lock the socket,
1804                  * we might find a transient negative value.
1805                  */
1806                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1807 
1808         seq_printf(seq,
1809                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1810                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1811                    i,
1812                    src->s6_addr32[0], src->s6_addr32[1],
1813                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1814                    dest->s6_addr32[0], dest->s6_addr32[1],
1815                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1816                    state,
1817                    tp->write_seq - tp->snd_una,
1818                    rx_queue,
1819                    timer_active,
1820                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1821                    icsk->icsk_retransmits,
1822                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1823                    icsk->icsk_probes_out,
1824                    sock_i_ino(sp),
1825                    refcount_read(&sp->sk_refcnt), sp,
1826                    jiffies_to_clock_t(icsk->icsk_rto),
1827                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1828                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1829                    tp->snd_cwnd,
1830                    state == TCP_LISTEN ?
1831                         fastopenq->max_qlen :
1832                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1833                    );
1834 }
1835 
1836 static void get_timewait6_sock(struct seq_file *seq,
1837                                struct inet_timewait_sock *tw, int i)
1838 {
1839         long delta = tw->tw_timer.expires - jiffies;
1840         const struct in6_addr *dest, *src;
1841         __u16 destp, srcp;
1842 
1843         dest = &tw->tw_v6_daddr;
1844         src  = &tw->tw_v6_rcv_saddr;
1845         destp = ntohs(tw->tw_dport);
1846         srcp  = ntohs(tw->tw_sport);
1847 
1848         seq_printf(seq,
1849                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1850                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1851                    i,
1852                    src->s6_addr32[0], src->s6_addr32[1],
1853                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1854                    dest->s6_addr32[0], dest->s6_addr32[1],
1855                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1856                    tw->tw_substate, 0, 0,
1857                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1858                    refcount_read(&tw->tw_refcnt), tw);
1859 }
1860 
1861 static int tcp6_seq_show(struct seq_file *seq, void *v)
1862 {
1863         struct tcp_iter_state *st;
1864         struct sock *sk = v;
1865 
1866         if (v == SEQ_START_TOKEN) {
1867                 seq_puts(seq,
1868                          "  sl  "
1869                          "local_address                         "
1870                          "remote_address                        "
1871                          "st tx_queue rx_queue tr tm->when retrnsmt"
1872                          "   uid  timeout inode\n");
1873                 goto out;
1874         }
1875         st = seq->private;
1876 
1877         if (sk->sk_state == TCP_TIME_WAIT)
1878                 get_timewait6_sock(seq, v, st->num);
1879         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1880                 get_openreq6(seq, v, st->num);
1881         else
1882                 get_tcp6_sock(seq, v, st->num);
1883 out:
1884         return 0;
1885 }
1886 
1887 static const struct file_operations tcp6_afinfo_seq_fops = {
1888         .owner   = THIS_MODULE,
1889         .open    = tcp_seq_open,
1890         .read    = seq_read,
1891         .llseek  = seq_lseek,
1892         .release = seq_release_net
1893 };
1894 
1895 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1896         .name           = "tcp6",
1897         .family         = AF_INET6,
1898         .seq_fops       = &tcp6_afinfo_seq_fops,
1899         .seq_ops        = {
1900                 .show           = tcp6_seq_show,
1901         },
1902 };
1903 
1904 int __net_init tcp6_proc_init(struct net *net)
1905 {
1906         return tcp_proc_register(net, &tcp6_seq_afinfo);
1907 }
1908 
1909 void tcp6_proc_exit(struct net *net)
1910 {
1911         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1912 }
1913 #endif
1914 
1915 struct proto tcpv6_prot = {
1916         .name                   = "TCPv6",
1917         .owner                  = THIS_MODULE,
1918         .close                  = tcp_close,
1919         .connect                = tcp_v6_connect,
1920         .disconnect             = tcp_disconnect,
1921         .accept                 = inet_csk_accept,
1922         .ioctl                  = tcp_ioctl,
1923         .init                   = tcp_v6_init_sock,
1924         .destroy                = tcp_v6_destroy_sock,
1925         .shutdown               = tcp_shutdown,
1926         .setsockopt             = tcp_setsockopt,
1927         .getsockopt             = tcp_getsockopt,
1928         .keepalive              = tcp_set_keepalive,
1929         .recvmsg                = tcp_recvmsg,
1930         .sendmsg                = tcp_sendmsg,
1931         .sendpage               = tcp_sendpage,
1932         .backlog_rcv            = tcp_v6_do_rcv,
1933         .release_cb             = tcp_release_cb,
1934         .hash                   = inet6_hash,
1935         .unhash                 = inet_unhash,
1936         .get_port               = inet_csk_get_port,
1937         .enter_memory_pressure  = tcp_enter_memory_pressure,
1938         .leave_memory_pressure  = tcp_leave_memory_pressure,
1939         .stream_memory_free     = tcp_stream_memory_free,
1940         .sockets_allocated      = &tcp_sockets_allocated,
1941         .memory_allocated       = &tcp_memory_allocated,
1942         .memory_pressure        = &tcp_memory_pressure,
1943         .orphan_count           = &tcp_orphan_count,
1944         .sysctl_mem             = sysctl_tcp_mem,
1945         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1946         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1947         .max_header             = MAX_TCP_HEADER,
1948         .obj_size               = sizeof(struct tcp6_sock),
1949         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
1950         .twsk_prot              = &tcp6_timewait_sock_ops,
1951         .rsk_prot               = &tcp6_request_sock_ops,
1952         .h.hashinfo             = &tcp_hashinfo,
1953         .no_autobind            = true,
1954 #ifdef CONFIG_COMPAT
1955         .compat_setsockopt      = compat_tcp_setsockopt,
1956         .compat_getsockopt      = compat_tcp_getsockopt,
1957 #endif
1958         .diag_destroy           = tcp_abort,
1959 };
1960 
1961 /* thinking of making this const? Don't.
1962  * early_demux can change based on sysctl.
1963  */
1964 static struct inet6_protocol tcpv6_protocol = {
1965         .early_demux    =       tcp_v6_early_demux,
1966         .early_demux_handler =  tcp_v6_early_demux,
1967         .handler        =       tcp_v6_rcv,
1968         .err_handler    =       tcp_v6_err,
1969         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1970 };
1971 
1972 static struct inet_protosw tcpv6_protosw = {
1973         .type           =       SOCK_STREAM,
1974         .protocol       =       IPPROTO_TCP,
1975         .prot           =       &tcpv6_prot,
1976         .ops            =       &inet6_stream_ops,
1977         .flags          =       INET_PROTOSW_PERMANENT |
1978                                 INET_PROTOSW_ICSK,
1979 };
1980 
1981 static int __net_init tcpv6_net_init(struct net *net)
1982 {
1983         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1984                                     SOCK_RAW, IPPROTO_TCP, net);
1985 }
1986 
1987 static void __net_exit tcpv6_net_exit(struct net *net)
1988 {
1989         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1990 }
1991 
1992 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1993 {
1994         inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1995 }
1996 
1997 static struct pernet_operations tcpv6_net_ops = {
1998         .init       = tcpv6_net_init,
1999         .exit       = tcpv6_net_exit,
2000         .exit_batch = tcpv6_net_exit_batch,
2001 };
2002 
2003 int __init tcpv6_init(void)
2004 {
2005         int ret;
2006 
2007         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2008         if (ret)
2009                 goto out;
2010 
2011         /* register inet6 protocol */
2012         ret = inet6_register_protosw(&tcpv6_protosw);
2013         if (ret)
2014                 goto out_tcpv6_protocol;
2015 
2016         ret = register_pernet_subsys(&tcpv6_net_ops);
2017         if (ret)
2018                 goto out_tcpv6_protosw;
2019 out:
2020         return ret;
2021 
2022 out_tcpv6_protosw:
2023         inet6_unregister_protosw(&tcpv6_protosw);
2024 out_tcpv6_protocol:
2025         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2026         goto out;
2027 }
2028 
2029 void tcpv6_exit(void)
2030 {
2031         unregister_pernet_subsys(&tcpv6_net_ops);
2032         inet6_unregister_protosw(&tcpv6_protosw);
2033         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2034 }
2035 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp