~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv6/tcp_ipv6.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      TCP over IPv6
  3  *      Linux INET6 implementation
  4  *
  5  *      Authors:
  6  *      Pedro Roque             <roque@di.fc.ul.pt>
  7  *
  8  *      Based on:
  9  *      linux/net/ipv4/tcp.c
 10  *      linux/net/ipv4/tcp_input.c
 11  *      linux/net/ipv4/tcp_output.c
 12  *
 13  *      Fixes:
 14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
 15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 17  *                                      a single port at the same time.
 18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
 19  *
 20  *      This program is free software; you can redistribute it and/or
 21  *      modify it under the terms of the GNU General Public License
 22  *      as published by the Free Software Foundation; either version
 23  *      2 of the License, or (at your option) any later version.
 24  */
 25 
 26 #include <linux/bottom_half.h>
 27 #include <linux/module.h>
 28 #include <linux/errno.h>
 29 #include <linux/types.h>
 30 #include <linux/socket.h>
 31 #include <linux/sockios.h>
 32 #include <linux/net.h>
 33 #include <linux/jiffies.h>
 34 #include <linux/in.h>
 35 #include <linux/in6.h>
 36 #include <linux/netdevice.h>
 37 #include <linux/init.h>
 38 #include <linux/jhash.h>
 39 #include <linux/ipsec.h>
 40 #include <linux/times.h>
 41 #include <linux/slab.h>
 42 #include <linux/uaccess.h>
 43 #include <linux/ipv6.h>
 44 #include <linux/icmpv6.h>
 45 #include <linux/random.h>
 46 
 47 #include <net/tcp.h>
 48 #include <net/ndisc.h>
 49 #include <net/inet6_hashtables.h>
 50 #include <net/inet6_connection_sock.h>
 51 #include <net/ipv6.h>
 52 #include <net/transp_v6.h>
 53 #include <net/addrconf.h>
 54 #include <net/ip6_route.h>
 55 #include <net/ip6_checksum.h>
 56 #include <net/inet_ecn.h>
 57 #include <net/protocol.h>
 58 #include <net/xfrm.h>
 59 #include <net/snmp.h>
 60 #include <net/dsfield.h>
 61 #include <net/timewait_sock.h>
 62 #include <net/inet_common.h>
 63 #include <net/secure_seq.h>
 64 #include <net/tcp_memcontrol.h>
 65 #include <net/busy_poll.h>
 66 
 67 #include <linux/proc_fs.h>
 68 #include <linux/seq_file.h>
 69 
 70 #include <linux/crypto.h>
 71 #include <linux/scatterlist.h>
 72 
 73 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
 74 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 75                                       struct request_sock *req);
 76 
 77 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 78 
 79 static const struct inet_connection_sock_af_ops ipv6_mapped;
 80 static const struct inet_connection_sock_af_ops ipv6_specific;
 81 #ifdef CONFIG_TCP_MD5SIG
 82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
 83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 84 #else
 85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 86                                                    const struct in6_addr *addr)
 87 {
 88         return NULL;
 89 }
 90 #endif
 91 
 92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 93 {
 94         struct dst_entry *dst = skb_dst(skb);
 95 
 96         if (dst && dst_hold_safe(dst)) {
 97                 const struct rt6_info *rt = (const struct rt6_info *)dst;
 98 
 99                 sk->sk_rx_dst = dst;
100                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
101                 if (rt->rt6i_node)
102                         inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
103         }
104 }
105 
106 static void tcp_v6_hash(struct sock *sk)
107 {
108         if (sk->sk_state != TCP_CLOSE) {
109                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
110                         tcp_prot.hash(sk);
111                         return;
112                 }
113                 local_bh_disable();
114                 __inet6_hash(sk, NULL);
115                 local_bh_enable();
116         }
117 }
118 
119 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 {
121         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
122                                             ipv6_hdr(skb)->saddr.s6_addr32,
123                                             tcp_hdr(skb)->dest,
124                                             tcp_hdr(skb)->source);
125 }
126 
127 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
128                           int addr_len)
129 {
130         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
131         struct inet_sock *inet = inet_sk(sk);
132         struct inet_connection_sock *icsk = inet_csk(sk);
133         struct ipv6_pinfo *np = inet6_sk(sk);
134         struct tcp_sock *tp = tcp_sk(sk);
135         struct in6_addr *saddr = NULL, *final_p, final;
136         struct ipv6_txoptions *opt;
137         struct rt6_info *rt;
138         struct flowi6 fl6;
139         struct dst_entry *dst;
140         int addr_type;
141         int err;
142 
143         if (addr_len < SIN6_LEN_RFC2133)
144                 return -EINVAL;
145 
146         if (usin->sin6_family != AF_INET6)
147                 return -EAFNOSUPPORT;
148 
149         memset(&fl6, 0, sizeof(fl6));
150 
151         if (np->sndflow) {
152                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153                 IP6_ECN_flow_init(fl6.flowlabel);
154                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155                         struct ip6_flowlabel *flowlabel;
156                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157                         if (flowlabel == NULL)
158                                 return -EINVAL;
159                         fl6_sock_release(flowlabel);
160                 }
161         }
162 
163         /*
164          *      connect() to INADDR_ANY means loopback (BSD'ism).
165          */
166 
167         if (ipv6_addr_any(&usin->sin6_addr)) {
168                 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
169                         ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
170                                                &usin->sin6_addr);
171                 else
172                         usin->sin6_addr = in6addr_loopback;
173         }
174 
175         addr_type = ipv6_addr_type(&usin->sin6_addr);
176 
177         if (addr_type & IPV6_ADDR_MULTICAST)
178                 return -ENETUNREACH;
179 
180         if (addr_type&IPV6_ADDR_LINKLOCAL) {
181                 if (addr_len >= sizeof(struct sockaddr_in6) &&
182                     usin->sin6_scope_id) {
183                         /* If interface is set while binding, indices
184                          * must coincide.
185                          */
186                         if (sk->sk_bound_dev_if &&
187                             sk->sk_bound_dev_if != usin->sin6_scope_id)
188                                 return -EINVAL;
189 
190                         sk->sk_bound_dev_if = usin->sin6_scope_id;
191                 }
192 
193                 /* Connect to link-local address requires an interface */
194                 if (!sk->sk_bound_dev_if)
195                         return -EINVAL;
196         }
197 
198         if (tp->rx_opt.ts_recent_stamp &&
199             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
200                 tp->rx_opt.ts_recent = 0;
201                 tp->rx_opt.ts_recent_stamp = 0;
202                 tp->write_seq = 0;
203         }
204 
205         sk->sk_v6_daddr = usin->sin6_addr;
206         np->flow_label = fl6.flowlabel;
207 
208         /*
209          *      TCP over IPv4
210          */
211 
212         if (addr_type & IPV6_ADDR_MAPPED) {
213                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
214                 struct sockaddr_in sin;
215 
216                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
217 
218                 if (__ipv6_only_sock(sk))
219                         return -ENETUNREACH;
220 
221                 sin.sin_family = AF_INET;
222                 sin.sin_port = usin->sin6_port;
223                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
224 
225                 icsk->icsk_af_ops = &ipv6_mapped;
226                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
227 #ifdef CONFIG_TCP_MD5SIG
228                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
229 #endif
230 
231                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
232 
233                 if (err) {
234                         icsk->icsk_ext_hdr_len = exthdrlen;
235                         icsk->icsk_af_ops = &ipv6_specific;
236                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
237 #ifdef CONFIG_TCP_MD5SIG
238                         tp->af_specific = &tcp_sock_ipv6_specific;
239 #endif
240                         goto failure;
241                 } else {
242                         ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
243                         ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
244                                                &sk->sk_v6_rcv_saddr);
245                 }
246 
247                 return err;
248         }
249 
250         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
251                 saddr = &sk->sk_v6_rcv_saddr;
252 
253         fl6.flowi6_proto = IPPROTO_TCP;
254         fl6.daddr = sk->sk_v6_daddr;
255         fl6.saddr = saddr ? *saddr : np->saddr;
256         fl6.flowi6_oif = sk->sk_bound_dev_if;
257         fl6.flowi6_mark = sk->sk_mark;
258         fl6.fl6_dport = usin->sin6_port;
259         fl6.fl6_sport = inet->inet_sport;
260 
261         opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
262         final_p = fl6_update_dst(&fl6, opt, &final);
263 
264         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
265 
266         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
267         if (IS_ERR(dst)) {
268                 err = PTR_ERR(dst);
269                 goto failure;
270         }
271 
272         if (saddr == NULL) {
273                 saddr = &fl6.saddr;
274                 sk->sk_v6_rcv_saddr = *saddr;
275         }
276 
277         /* set the source address */
278         np->saddr = *saddr;
279         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
280 
281         sk->sk_gso_type = SKB_GSO_TCPV6;
282         __ip6_dst_store(sk, dst, NULL, NULL);
283 
284         rt = (struct rt6_info *) dst;
285         if (tcp_death_row.sysctl_tw_recycle &&
286             !tp->rx_opt.ts_recent_stamp &&
287             ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
288                 tcp_fetch_timewait_stamp(sk, dst);
289 
290         icsk->icsk_ext_hdr_len = 0;
291         if (opt)
292                 icsk->icsk_ext_hdr_len = opt->opt_flen +
293                                          opt->opt_nflen;
294 
295         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
296 
297         inet->inet_dport = usin->sin6_port;
298 
299         tcp_set_state(sk, TCP_SYN_SENT);
300         err = inet6_hash_connect(&tcp_death_row, sk);
301         if (err)
302                 goto late_failure;
303 
304         ip6_set_txhash(sk);
305 
306         if (!tp->write_seq && likely(!tp->repair))
307                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
308                                                              sk->sk_v6_daddr.s6_addr32,
309                                                              inet->inet_sport,
310                                                              inet->inet_dport);
311 
312         err = tcp_connect(sk);
313         if (err)
314                 goto late_failure;
315 
316         return 0;
317 
318 late_failure:
319         tcp_set_state(sk, TCP_CLOSE);
320         __sk_dst_reset(sk);
321 failure:
322         inet->inet_dport = 0;
323         sk->sk_route_caps = 0;
324         return err;
325 }
326 
327 static void tcp_v6_mtu_reduced(struct sock *sk)
328 {
329         struct dst_entry *dst;
330 
331         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
332                 return;
333 
334         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
335         if (!dst)
336                 return;
337 
338         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
339                 tcp_sync_mss(sk, dst_mtu(dst));
340                 tcp_simple_retransmit(sk);
341         }
342 }
343 
344 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
345                 u8 type, u8 code, int offset, __be32 info)
346 {
347         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
348         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
349         struct ipv6_pinfo *np;
350         struct sock *sk;
351         int err;
352         struct tcp_sock *tp;
353         struct request_sock *fastopen;
354         __u32 seq, snd_una;
355         struct net *net = dev_net(skb->dev);
356 
357         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
358                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
359 
360         if (sk == NULL) {
361                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
362                                    ICMP6_MIB_INERRORS);
363                 return;
364         }
365 
366         if (sk->sk_state == TCP_TIME_WAIT) {
367                 inet_twsk_put(inet_twsk(sk));
368                 return;
369         }
370 
371         bh_lock_sock(sk);
372         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
373                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
374 
375         if (sk->sk_state == TCP_CLOSE)
376                 goto out;
377 
378         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
379                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
380                 goto out;
381         }
382 
383         tp = tcp_sk(sk);
384         seq = ntohl(th->seq);
385         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
386         fastopen = tp->fastopen_rsk;
387         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
388         if (sk->sk_state != TCP_LISTEN &&
389             !between(seq, snd_una, tp->snd_nxt)) {
390                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
391                 goto out;
392         }
393 
394         np = inet6_sk(sk);
395 
396         if (type == NDISC_REDIRECT) {
397                 if (!sock_owned_by_user(sk)) {
398                         struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
399 
400                         if (dst)
401                                 dst->ops->redirect(dst, sk, skb);
402                 }
403                 goto out;
404         }
405 
406         if (type == ICMPV6_PKT_TOOBIG) {
407                 /* We are not interested in TCP_LISTEN and open_requests
408                  * (SYN-ACKs send out by Linux are always <576bytes so
409                  * they should go through unfragmented).
410                  */
411                 if (sk->sk_state == TCP_LISTEN)
412                         goto out;
413 
414                 if (!ip6_sk_accept_pmtu(sk))
415                         goto out;
416 
417                 tp->mtu_info = ntohl(info);
418                 if (!sock_owned_by_user(sk))
419                         tcp_v6_mtu_reduced(sk);
420                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
421                                            &tp->tsq_flags))
422                         sock_hold(sk);
423                 goto out;
424         }
425 
426         icmpv6_err_convert(type, code, &err);
427 
428         /* Might be for an request_sock */
429         switch (sk->sk_state) {
430                 struct request_sock *req, **prev;
431         case TCP_LISTEN:
432                 if (sock_owned_by_user(sk))
433                         goto out;
434 
435                 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
436                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
437                                            &hdr->saddr, inet6_iif(skb));
438                 if (!req)
439                         goto out;
440 
441                 /* ICMPs are not backlogged, hence we cannot get
442                  * an established socket here.
443                  */
444                 WARN_ON(req->sk != NULL);
445 
446                 if (seq != tcp_rsk(req)->snt_isn) {
447                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
448                         goto out;
449                 }
450 
451                 inet_csk_reqsk_queue_drop(sk, req, prev);
452                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
453                 goto out;
454 
455         case TCP_SYN_SENT:
456         case TCP_SYN_RECV:
457                 /* Only in fast or simultaneous open. If a fast open socket is
458                  * is already accepted it is treated as a connected one below.
459                  */
460                 if (fastopen && fastopen->sk == NULL)
461                         break;
462 
463                 if (!sock_owned_by_user(sk)) {
464                         sk->sk_err = err;
465                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
466 
467                         tcp_done(sk);
468                 } else
469                         sk->sk_err_soft = err;
470                 goto out;
471         }
472 
473         if (!sock_owned_by_user(sk) && np->recverr) {
474                 sk->sk_err = err;
475                 sk->sk_error_report(sk);
476         } else
477                 sk->sk_err_soft = err;
478 
479 out:
480         bh_unlock_sock(sk);
481         sock_put(sk);
482 }
483 
484 
485 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
486                               struct flowi *fl,
487                               struct request_sock *req,
488                               u16 queue_mapping,
489                               struct tcp_fastopen_cookie *foc)
490 {
491         struct inet_request_sock *ireq = inet_rsk(req);
492         struct ipv6_pinfo *np = inet6_sk(sk);
493         struct flowi6 *fl6 = &fl->u.ip6;
494         struct sk_buff *skb;
495         int err = -ENOMEM;
496 
497         /* First, grab a route. */
498         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
499                 goto done;
500 
501         skb = tcp_make_synack(sk, dst, req, foc);
502 
503         if (skb) {
504                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
505                                     &ireq->ir_v6_rmt_addr);
506 
507                 fl6->daddr = ireq->ir_v6_rmt_addr;
508                 if (np->repflow && (ireq->pktopts != NULL))
509                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
510 
511                 skb_set_queue_mapping(skb, queue_mapping);
512                 rcu_read_lock();
513                 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
514                                np->tclass);
515                 rcu_read_unlock();
516                 err = net_xmit_eval(err);
517         }
518 
519 done:
520         return err;
521 }
522 
523 
524 static void tcp_v6_reqsk_destructor(struct request_sock *req)
525 {
526         kfree_skb(inet_rsk(req)->pktopts);
527 }
528 
529 #ifdef CONFIG_TCP_MD5SIG
530 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
531                                                    const struct in6_addr *addr)
532 {
533         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
534 }
535 
536 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
537                                                 struct sock *addr_sk)
538 {
539         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
540 }
541 
542 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
543                                                       struct request_sock *req)
544 {
545         return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
546 }
547 
548 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
549                                  int optlen)
550 {
551         struct tcp_md5sig cmd;
552         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
553 
554         if (optlen < sizeof(cmd))
555                 return -EINVAL;
556 
557         if (copy_from_user(&cmd, optval, sizeof(cmd)))
558                 return -EFAULT;
559 
560         if (sin6->sin6_family != AF_INET6)
561                 return -EINVAL;
562 
563         if (!cmd.tcpm_keylen) {
564                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
565                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
566                                               AF_INET);
567                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568                                       AF_INET6);
569         }
570 
571         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
572                 return -EINVAL;
573 
574         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
575                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
576                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
577 
578         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
579                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
580 }
581 
582 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
583                                         const struct in6_addr *daddr,
584                                         const struct in6_addr *saddr, int nbytes)
585 {
586         struct tcp6_pseudohdr *bp;
587         struct scatterlist sg;
588 
589         bp = &hp->md5_blk.ip6;
590         /* 1. TCP pseudo-header (RFC2460) */
591         bp->saddr = *saddr;
592         bp->daddr = *daddr;
593         bp->protocol = cpu_to_be32(IPPROTO_TCP);
594         bp->len = cpu_to_be32(nbytes);
595 
596         sg_init_one(&sg, bp, sizeof(*bp));
597         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
598 }
599 
600 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
601                                const struct in6_addr *daddr, struct in6_addr *saddr,
602                                const struct tcphdr *th)
603 {
604         struct tcp_md5sig_pool *hp;
605         struct hash_desc *desc;
606 
607         hp = tcp_get_md5sig_pool();
608         if (!hp)
609                 goto clear_hash_noput;
610         desc = &hp->md5_desc;
611 
612         if (crypto_hash_init(desc))
613                 goto clear_hash;
614         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
615                 goto clear_hash;
616         if (tcp_md5_hash_header(hp, th))
617                 goto clear_hash;
618         if (tcp_md5_hash_key(hp, key))
619                 goto clear_hash;
620         if (crypto_hash_final(desc, md5_hash))
621                 goto clear_hash;
622 
623         tcp_put_md5sig_pool();
624         return 0;
625 
626 clear_hash:
627         tcp_put_md5sig_pool();
628 clear_hash_noput:
629         memset(md5_hash, 0, 16);
630         return 1;
631 }
632 
633 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
634                                const struct sock *sk,
635                                const struct request_sock *req,
636                                const struct sk_buff *skb)
637 {
638         const struct in6_addr *saddr, *daddr;
639         struct tcp_md5sig_pool *hp;
640         struct hash_desc *desc;
641         const struct tcphdr *th = tcp_hdr(skb);
642 
643         if (sk) {
644                 saddr = &inet6_sk(sk)->saddr;
645                 daddr = &sk->sk_v6_daddr;
646         } else if (req) {
647                 saddr = &inet_rsk(req)->ir_v6_loc_addr;
648                 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
649         } else {
650                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
651                 saddr = &ip6h->saddr;
652                 daddr = &ip6h->daddr;
653         }
654 
655         hp = tcp_get_md5sig_pool();
656         if (!hp)
657                 goto clear_hash_noput;
658         desc = &hp->md5_desc;
659 
660         if (crypto_hash_init(desc))
661                 goto clear_hash;
662 
663         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
664                 goto clear_hash;
665         if (tcp_md5_hash_header(hp, th))
666                 goto clear_hash;
667         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
668                 goto clear_hash;
669         if (tcp_md5_hash_key(hp, key))
670                 goto clear_hash;
671         if (crypto_hash_final(desc, md5_hash))
672                 goto clear_hash;
673 
674         tcp_put_md5sig_pool();
675         return 0;
676 
677 clear_hash:
678         tcp_put_md5sig_pool();
679 clear_hash_noput:
680         memset(md5_hash, 0, 16);
681         return 1;
682 }
683 
684 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
685                                      const struct sk_buff *skb)
686 {
687         const __u8 *hash_location = NULL;
688         struct tcp_md5sig_key *hash_expected;
689         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
690         const struct tcphdr *th = tcp_hdr(skb);
691         int genhash;
692         u8 newhash[16];
693 
694         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
695         hash_location = tcp_parse_md5sig_option(th);
696 
697         /* We've parsed the options - do we have a hash? */
698         if (!hash_expected && !hash_location)
699                 return 0;
700 
701         if (hash_expected && !hash_location) {
702                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
703                 return 1;
704         }
705 
706         if (!hash_expected && hash_location) {
707                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
708                 return 1;
709         }
710 
711         /* check the signature */
712         genhash = tcp_v6_md5_hash_skb(newhash,
713                                       hash_expected,
714                                       NULL, NULL, skb);
715 
716         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
717                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
718                                      genhash ? "failed" : "mismatch",
719                                      &ip6h->saddr, ntohs(th->source),
720                                      &ip6h->daddr, ntohs(th->dest));
721                 return 1;
722         }
723         return 0;
724 }
725 
726 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
727 {
728         int ret;
729 
730         rcu_read_lock();
731         ret = __tcp_v6_inbound_md5_hash(sk, skb);
732         rcu_read_unlock();
733 
734         return ret;
735 }
736 
737 #endif
738 
739 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
740                             struct sk_buff *skb)
741 {
742         struct inet_request_sock *ireq = inet_rsk(req);
743         struct ipv6_pinfo *np = inet6_sk(sk);
744 
745         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
746         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
747 
748         ireq->ir_iif = sk->sk_bound_dev_if;
749 
750         /* So that link locals have meaning */
751         if (!sk->sk_bound_dev_if &&
752             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
753                 ireq->ir_iif = tcp_v6_iif(skb);
754 
755         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
756             (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
757              np->rxopt.bits.rxinfo ||
758              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
759              np->rxopt.bits.rxohlim || np->repflow)) {
760                 atomic_inc(&skb->users);
761                 ireq->pktopts = skb;
762         }
763 }
764 
765 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
766                                           const struct request_sock *req,
767                                           bool *strict)
768 {
769         if (strict)
770                 *strict = true;
771         return inet6_csk_route_req(sk, &fl->u.ip6, req);
772 }
773 
774 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
775         .family         =       AF_INET6,
776         .obj_size       =       sizeof(struct tcp6_request_sock),
777         .rtx_syn_ack    =       tcp_rtx_synack,
778         .send_ack       =       tcp_v6_reqsk_send_ack,
779         .destructor     =       tcp_v6_reqsk_destructor,
780         .send_reset     =       tcp_v6_send_reset,
781         .syn_ack_timeout =      tcp_syn_ack_timeout,
782 };
783 
784 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
785         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
786                                 sizeof(struct ipv6hdr),
787 #ifdef CONFIG_TCP_MD5SIG
788         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
789         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
790 #endif
791         .init_req       =       tcp_v6_init_req,
792 #ifdef CONFIG_SYN_COOKIES
793         .cookie_init_seq =      cookie_v6_init_sequence,
794 #endif
795         .route_req      =       tcp_v6_route_req,
796         .init_seq       =       tcp_v6_init_sequence,
797         .send_synack    =       tcp_v6_send_synack,
798         .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
799 };
800 
801 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
802                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
803                                  int oif, struct tcp_md5sig_key *key, int rst,
804                                  u8 tclass, u32 label)
805 {
806         const struct tcphdr *th = tcp_hdr(skb);
807         struct tcphdr *t1;
808         struct sk_buff *buff;
809         struct flowi6 fl6;
810         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
811         struct sock *ctl_sk = net->ipv6.tcp_sk;
812         unsigned int tot_len = sizeof(struct tcphdr);
813         struct dst_entry *dst;
814         __be32 *topt;
815 
816         if (tsecr)
817                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
818 #ifdef CONFIG_TCP_MD5SIG
819         if (key)
820                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
821 #endif
822 
823         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
824                          GFP_ATOMIC);
825         if (buff == NULL)
826                 return;
827 
828         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
829 
830         t1 = (struct tcphdr *) skb_push(buff, tot_len);
831         skb_reset_transport_header(buff);
832 
833         /* Swap the send and the receive. */
834         memset(t1, 0, sizeof(*t1));
835         t1->dest = th->source;
836         t1->source = th->dest;
837         t1->doff = tot_len / 4;
838         t1->seq = htonl(seq);
839         t1->ack_seq = htonl(ack);
840         t1->ack = !rst || !th->ack;
841         t1->rst = rst;
842         t1->window = htons(win);
843 
844         topt = (__be32 *)(t1 + 1);
845 
846         if (tsecr) {
847                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
849                 *topt++ = htonl(tsval);
850                 *topt++ = htonl(tsecr);
851         }
852 
853 #ifdef CONFIG_TCP_MD5SIG
854         if (key) {
855                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
856                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
857                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
858                                     &ipv6_hdr(skb)->saddr,
859                                     &ipv6_hdr(skb)->daddr, t1);
860         }
861 #endif
862 
863         memset(&fl6, 0, sizeof(fl6));
864         fl6.daddr = ipv6_hdr(skb)->saddr;
865         fl6.saddr = ipv6_hdr(skb)->daddr;
866         fl6.flowlabel = label;
867 
868         buff->ip_summed = CHECKSUM_PARTIAL;
869         buff->csum = 0;
870 
871         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
872 
873         fl6.flowi6_proto = IPPROTO_TCP;
874         if (rt6_need_strict(&fl6.daddr) && !oif)
875                 fl6.flowi6_oif = tcp_v6_iif(skb);
876         else
877                 fl6.flowi6_oif = oif;
878         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
879         fl6.fl6_dport = t1->dest;
880         fl6.fl6_sport = t1->source;
881         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
882 
883         /* Pass a socket to ip6_dst_lookup either it is for RST
884          * Underlying function will use this to retrieve the network
885          * namespace
886          */
887         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
888         if (!IS_ERR(dst)) {
889                 skb_dst_set(buff, dst);
890                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
891                 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
892                 if (rst)
893                         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
894                 return;
895         }
896 
897         kfree_skb(buff);
898 }
899 
900 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
901 {
902         const struct tcphdr *th = tcp_hdr(skb);
903         u32 seq = 0, ack_seq = 0;
904         struct tcp_md5sig_key *key = NULL;
905 #ifdef CONFIG_TCP_MD5SIG
906         const __u8 *hash_location = NULL;
907         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
908         unsigned char newhash[16];
909         int genhash;
910         struct sock *sk1 = NULL;
911 #endif
912         int oif;
913 
914         if (th->rst)
915                 return;
916 
917         /* If sk not NULL, it means we did a successful lookup and incoming
918          * route had to be correct. prequeue might have dropped our dst.
919          */
920         if (!sk && !ipv6_unicast_destination(skb))
921                 return;
922 
923 #ifdef CONFIG_TCP_MD5SIG
924         hash_location = tcp_parse_md5sig_option(th);
925         if (!sk && hash_location) {
926                 /*
927                  * active side is lost. Try to find listening socket through
928                  * source port, and then find md5 key through listening socket.
929                  * we are not loose security here:
930                  * Incoming packet is checked with md5 hash with finding key,
931                  * no RST generated if md5 hash doesn't match.
932                  */
933                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
934                                            &tcp_hashinfo, &ipv6h->saddr,
935                                            th->source, &ipv6h->daddr,
936                                            ntohs(th->source), tcp_v6_iif(skb));
937                 if (!sk1)
938                         return;
939 
940                 rcu_read_lock();
941                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
942                 if (!key)
943                         goto release_sk1;
944 
945                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
946                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
947                         goto release_sk1;
948         } else {
949                 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
950         }
951 #endif
952 
953         if (th->ack)
954                 seq = ntohl(th->ack_seq);
955         else
956                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
957                           (th->doff << 2);
958 
959         oif = sk ? sk->sk_bound_dev_if : 0;
960         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
961 
962 #ifdef CONFIG_TCP_MD5SIG
963 release_sk1:
964         if (sk1) {
965                 rcu_read_unlock();
966                 sock_put(sk1);
967         }
968 #endif
969 }
970 
971 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
972                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
973                             struct tcp_md5sig_key *key, u8 tclass,
974                             u32 label)
975 {
976         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
977                              tclass, label);
978 }
979 
980 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
981 {
982         struct inet_timewait_sock *tw = inet_twsk(sk);
983         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
984 
985         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
986                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
987                         tcp_time_stamp + tcptw->tw_ts_offset,
988                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
989                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
990 
991         inet_twsk_put(tw);
992 }
993 
994 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
995                                   struct request_sock *req)
996 {
997         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
998          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
999          */
1000         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1001                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1002                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
1003                         tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
1004                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1005                         0, 0);
1006 }
1007 
1008 
1009 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
1010 {
1011         struct request_sock *req, **prev;
1012         const struct tcphdr *th = tcp_hdr(skb);
1013         struct sock *nsk;
1014 
1015         /* Find possible connection requests. */
1016         req = inet6_csk_search_req(sk, &prev, th->source,
1017                                    &ipv6_hdr(skb)->saddr,
1018                                    &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1019         if (req)
1020                 return tcp_check_req(sk, skb, req, prev, false);
1021 
1022         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1023                                          &ipv6_hdr(skb)->saddr, th->source,
1024                                          &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1025                                          tcp_v6_iif(skb));
1026 
1027         if (nsk) {
1028                 if (nsk->sk_state != TCP_TIME_WAIT) {
1029                         bh_lock_sock(nsk);
1030                         return nsk;
1031                 }
1032                 inet_twsk_put(inet_twsk(nsk));
1033                 return NULL;
1034         }
1035 
1036 #ifdef CONFIG_SYN_COOKIES
1037         if (!th->syn)
1038                 sk = cookie_v6_check(sk, skb);
1039 #endif
1040         return sk;
1041 }
1042 
1043 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1044 {
1045         if (skb->protocol == htons(ETH_P_IP))
1046                 return tcp_v4_conn_request(sk, skb);
1047 
1048         if (!ipv6_unicast_destination(skb))
1049                 goto drop;
1050 
1051         return tcp_conn_request(&tcp6_request_sock_ops,
1052                                 &tcp_request_sock_ipv6_ops, sk, skb);
1053 
1054 drop:
1055         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1056         return 0; /* don't send reset */
1057 }
1058 
1059 static void tcp_v6_restore_cb(struct sk_buff *skb)
1060 {
1061         /* We need to move header back to the beginning if xfrm6_policy_check()
1062          * and tcp_v6_fill_cb() are going to be called again.
1063          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1064          */
1065         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1066                 sizeof(struct inet6_skb_parm));
1067 }
1068 
1069 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1070                                          struct request_sock *req,
1071                                          struct dst_entry *dst)
1072 {
1073         struct inet_request_sock *ireq;
1074         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1075         struct tcp6_sock *newtcp6sk;
1076         struct ipv6_txoptions *opt;
1077         struct inet_sock *newinet;
1078         struct tcp_sock *newtp;
1079         struct sock *newsk;
1080 #ifdef CONFIG_TCP_MD5SIG
1081         struct tcp_md5sig_key *key;
1082 #endif
1083         struct flowi6 fl6;
1084 
1085         if (skb->protocol == htons(ETH_P_IP)) {
1086                 /*
1087                  *      v6 mapped
1088                  */
1089 
1090                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1091 
1092                 if (newsk == NULL)
1093                         return NULL;
1094 
1095                 newtcp6sk = (struct tcp6_sock *)newsk;
1096                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1097 
1098                 newinet = inet_sk(newsk);
1099                 newnp = inet6_sk(newsk);
1100                 newtp = tcp_sk(newsk);
1101 
1102                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1103 
1104                 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1105 
1106                 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1107 
1108                 newsk->sk_v6_rcv_saddr = newnp->saddr;
1109 
1110                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1111                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1112 #ifdef CONFIG_TCP_MD5SIG
1113                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1114 #endif
1115 
1116                 newnp->ipv6_mc_list = NULL;
1117                 newnp->ipv6_ac_list = NULL;
1118                 newnp->ipv6_fl_list = NULL;
1119                 newnp->pktoptions  = NULL;
1120                 newnp->opt         = NULL;
1121                 newnp->mcast_oif   = inet_iif(skb);
1122                 newnp->mcast_hops  = ip_hdr(skb)->ttl;
1123                 newnp->rcv_flowinfo = 0;
1124                 if (np->repflow)
1125                         newnp->flow_label = 0;
1126 
1127                 /*
1128                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1129                  * here, tcp_create_openreq_child now does this for us, see the comment in
1130                  * that function for the gory details. -acme
1131                  */
1132 
1133                 /* It is tricky place. Until this moment IPv4 tcp
1134                    worked with IPv6 icsk.icsk_af_ops.
1135                    Sync it now.
1136                  */
1137                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1138 
1139                 return newsk;
1140         }
1141 
1142         ireq = inet_rsk(req);
1143 
1144         if (sk_acceptq_is_full(sk))
1145                 goto out_overflow;
1146 
1147         if (!dst) {
1148                 dst = inet6_csk_route_req(sk, &fl6, req);
1149                 if (!dst)
1150                         goto out;
1151         }
1152 
1153         newsk = tcp_create_openreq_child(sk, req, skb);
1154         if (newsk == NULL)
1155                 goto out_nonewsk;
1156 
1157         /*
1158          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1159          * count here, tcp_create_openreq_child now does this for us, see the
1160          * comment in that function for the gory details. -acme
1161          */
1162 
1163         newsk->sk_gso_type = SKB_GSO_TCPV6;
1164         __ip6_dst_store(newsk, dst, NULL, NULL);
1165         inet6_sk_rx_dst_set(newsk, skb);
1166 
1167         newtcp6sk = (struct tcp6_sock *)newsk;
1168         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1169 
1170         newtp = tcp_sk(newsk);
1171         newinet = inet_sk(newsk);
1172         newnp = inet6_sk(newsk);
1173 
1174         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1175 
1176         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1177         newnp->saddr = ireq->ir_v6_loc_addr;
1178         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1179         newsk->sk_bound_dev_if = ireq->ir_iif;
1180 
1181         ip6_set_txhash(newsk);
1182 
1183         /* Now IPv6 options...
1184 
1185            First: no IPv4 options.
1186          */
1187         newinet->inet_opt = NULL;
1188         newnp->ipv6_mc_list = NULL;
1189         newnp->ipv6_ac_list = NULL;
1190         newnp->ipv6_fl_list = NULL;
1191 
1192         /* Clone RX bits */
1193         newnp->rxopt.all = np->rxopt.all;
1194 
1195         /* Clone pktoptions received with SYN */
1196         newnp->pktoptions = NULL;
1197         if (ireq->pktopts != NULL) {
1198                 newnp->pktoptions = skb_clone(ireq->pktopts,
1199                                               sk_gfp_atomic(sk, GFP_ATOMIC));
1200                 consume_skb(ireq->pktopts);
1201                 ireq->pktopts = NULL;
1202                 if (newnp->pktoptions) {
1203                         tcp_v6_restore_cb(newnp->pktoptions);
1204                         skb_set_owner_r(newnp->pktoptions, newsk);
1205                 }
1206         }
1207         newnp->opt        = NULL;
1208         newnp->mcast_oif  = tcp_v6_iif(skb);
1209         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1210         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1211         if (np->repflow)
1212                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1213 
1214         /* Clone native IPv6 options from listening socket (if any)
1215 
1216            Yes, keeping reference count would be much more clever,
1217            but we make one more one thing there: reattach optmem
1218            to newsk.
1219          */
1220         opt = rcu_dereference(np->opt);
1221         if (opt) {
1222                 opt = ipv6_dup_options(newsk, opt);
1223                 RCU_INIT_POINTER(newnp->opt, opt);
1224         }
1225         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1226         if (opt)
1227                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1228                                                     opt->opt_flen;
1229 
1230         tcp_sync_mss(newsk, dst_mtu(dst));
1231         newtp->advmss = dst_metric_advmss(dst);
1232         if (tcp_sk(sk)->rx_opt.user_mss &&
1233             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1234                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1235 
1236         tcp_initialize_rcv_mss(newsk);
1237 
1238         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1239         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1240 
1241 #ifdef CONFIG_TCP_MD5SIG
1242         /* Copy over the MD5 key from the original socket */
1243         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1244         if (key != NULL) {
1245                 /* We're using one, so create a matching key
1246                  * on the newsk structure. If we fail to get
1247                  * memory, then we end up not copying the key
1248                  * across. Shucks.
1249                  */
1250                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1251                                AF_INET6, key->key, key->keylen,
1252                                sk_gfp_atomic(sk, GFP_ATOMIC));
1253         }
1254 #endif
1255 
1256         if (__inet_inherit_port(sk, newsk) < 0) {
1257                 inet_csk_prepare_forced_close(newsk);
1258                 tcp_done(newsk);
1259                 goto out;
1260         }
1261         __inet6_hash(newsk, NULL);
1262 
1263         return newsk;
1264 
1265 out_overflow:
1266         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1267 out_nonewsk:
1268         dst_release(dst);
1269 out:
1270         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1271         return NULL;
1272 }
1273 
1274 /* The socket must have it's spinlock held when we get
1275  * here.
1276  *
1277  * We have a potential double-lock case here, so even when
1278  * doing backlog processing we use the BH locking scheme.
1279  * This is because we cannot sleep with the original spinlock
1280  * held.
1281  */
1282 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1283 {
1284         struct ipv6_pinfo *np = inet6_sk(sk);
1285         struct tcp_sock *tp;
1286         struct sk_buff *opt_skb = NULL;
1287 
1288         /* Imagine: socket is IPv6. IPv4 packet arrives,
1289            goes to IPv4 receive handler and backlogged.
1290            From backlog it always goes here. Kerboom...
1291            Fortunately, tcp_rcv_established and rcv_established
1292            handle them correctly, but it is not case with
1293            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1294          */
1295 
1296         if (skb->protocol == htons(ETH_P_IP))
1297                 return tcp_v4_do_rcv(sk, skb);
1298 
1299         if (sk_filter(sk, skb))
1300                 goto discard;
1301 
1302         /*
1303          *      socket locking is here for SMP purposes as backlog rcv
1304          *      is currently called with bh processing disabled.
1305          */
1306 
1307         /* Do Stevens' IPV6_PKTOPTIONS.
1308 
1309            Yes, guys, it is the only place in our code, where we
1310            may make it not affecting IPv4.
1311            The rest of code is protocol independent,
1312            and I do not like idea to uglify IPv4.
1313 
1314            Actually, all the idea behind IPV6_PKTOPTIONS
1315            looks not very well thought. For now we latch
1316            options, received in the last packet, enqueued
1317            by tcp. Feel free to propose better solution.
1318                                                --ANK (980728)
1319          */
1320         if (np->rxopt.all)
1321                 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1322 
1323         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1324                 struct dst_entry *dst = sk->sk_rx_dst;
1325 
1326                 sock_rps_save_rxhash(sk, skb);
1327                 if (dst) {
1328                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1329                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1330                                 dst_release(dst);
1331                                 sk->sk_rx_dst = NULL;
1332                         }
1333                 }
1334 
1335                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1336                 if (opt_skb)
1337                         goto ipv6_pktoptions;
1338                 return 0;
1339         }
1340 
1341         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1342                 goto csum_err;
1343 
1344         if (sk->sk_state == TCP_LISTEN) {
1345                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1346                 if (!nsk)
1347                         goto discard;
1348 
1349                 /*
1350                  * Queue it on the new socket if the new socket is active,
1351                  * otherwise we just shortcircuit this and continue with
1352                  * the new socket..
1353                  */
1354                 if (nsk != sk) {
1355                         sock_rps_save_rxhash(nsk, skb);
1356                         if (tcp_child_process(sk, nsk, skb))
1357                                 goto reset;
1358                         if (opt_skb)
1359                                 __kfree_skb(opt_skb);
1360                         return 0;
1361                 }
1362         } else
1363                 sock_rps_save_rxhash(sk, skb);
1364 
1365         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1366                 goto reset;
1367         if (opt_skb)
1368                 goto ipv6_pktoptions;
1369         return 0;
1370 
1371 reset:
1372         tcp_v6_send_reset(sk, skb);
1373 discard:
1374         if (opt_skb)
1375                 __kfree_skb(opt_skb);
1376         kfree_skb(skb);
1377         return 0;
1378 csum_err:
1379         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1380         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1381         goto discard;
1382 
1383 
1384 ipv6_pktoptions:
1385         /* Do you ask, what is it?
1386 
1387            1. skb was enqueued by tcp.
1388            2. skb is added to tail of read queue, rather than out of order.
1389            3. socket is not in passive state.
1390            4. Finally, it really contains options, which user wants to receive.
1391          */
1392         tp = tcp_sk(sk);
1393         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1394             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1395                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1396                         np->mcast_oif = tcp_v6_iif(opt_skb);
1397                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1398                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1399                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1400                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1401                 if (np->repflow)
1402                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1403                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1404                         skb_set_owner_r(opt_skb, sk);
1405                         tcp_v6_restore_cb(opt_skb);
1406                         opt_skb = xchg(&np->pktoptions, opt_skb);
1407                 } else {
1408                         __kfree_skb(opt_skb);
1409                         opt_skb = xchg(&np->pktoptions, NULL);
1410                 }
1411         }
1412 
1413         kfree_skb(opt_skb);
1414         return 0;
1415 }
1416 
1417 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1418                            const struct tcphdr *th)
1419 {
1420         /* This is tricky: we move IP6CB at its correct location into
1421          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1422          * _decode_session6() uses IP6CB().
1423          * barrier() makes sure compiler won't play aliasing games.
1424          */
1425         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1426                 sizeof(struct inet6_skb_parm));
1427         barrier();
1428 
1429         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1430         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1431                                     skb->len - th->doff*4);
1432         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1433         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1434         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1435         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1436         TCP_SKB_CB(skb)->sacked = 0;
1437 }
1438 
1439 static int tcp_v6_rcv(struct sk_buff *skb)
1440 {
1441         const struct tcphdr *th;
1442         const struct ipv6hdr *hdr;
1443         struct sock *sk;
1444         int ret;
1445         struct net *net = dev_net(skb->dev);
1446 
1447         if (skb->pkt_type != PACKET_HOST)
1448                 goto discard_it;
1449 
1450         /*
1451          *      Count it even if it's bad.
1452          */
1453         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1454 
1455         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1456                 goto discard_it;
1457 
1458         th = tcp_hdr(skb);
1459 
1460         if (th->doff < sizeof(struct tcphdr)/4)
1461                 goto bad_packet;
1462         if (!pskb_may_pull(skb, th->doff*4))
1463                 goto discard_it;
1464 
1465         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1466                 goto csum_error;
1467 
1468         th = tcp_hdr(skb);
1469         hdr = ipv6_hdr(skb);
1470 
1471         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1472                                 inet6_iif(skb));
1473         if (!sk)
1474                 goto no_tcp_socket;
1475 
1476 process:
1477         if (sk->sk_state == TCP_TIME_WAIT)
1478                 goto do_time_wait;
1479 
1480         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1481                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1482                 goto discard_and_relse;
1483         }
1484 
1485         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1486                 goto discard_and_relse;
1487 
1488         tcp_v6_fill_cb(skb, hdr, th);
1489 
1490 #ifdef CONFIG_TCP_MD5SIG
1491         if (tcp_v6_inbound_md5_hash(sk, skb))
1492                 goto discard_and_relse;
1493 #endif
1494 
1495         if (sk_filter(sk, skb))
1496                 goto discard_and_relse;
1497 
1498         sk_mark_napi_id(sk, skb);
1499         skb->dev = NULL;
1500 
1501         bh_lock_sock_nested(sk);
1502         ret = 0;
1503         if (!sock_owned_by_user(sk)) {
1504                 if (!tcp_prequeue(sk, skb))
1505                         ret = tcp_v6_do_rcv(sk, skb);
1506         } else if (unlikely(sk_add_backlog(sk, skb,
1507                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1508                 bh_unlock_sock(sk);
1509                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1510                 goto discard_and_relse;
1511         }
1512         bh_unlock_sock(sk);
1513 
1514         sock_put(sk);
1515         return ret ? -1 : 0;
1516 
1517 no_tcp_socket:
1518         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1519                 goto discard_it;
1520 
1521         tcp_v6_fill_cb(skb, hdr, th);
1522 
1523         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1524 csum_error:
1525                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1526 bad_packet:
1527                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1528         } else {
1529                 tcp_v6_send_reset(NULL, skb);
1530         }
1531 
1532 discard_it:
1533         kfree_skb(skb);
1534         return 0;
1535 
1536 discard_and_relse:
1537         sock_put(sk);
1538         goto discard_it;
1539 
1540 do_time_wait:
1541         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1542                 inet_twsk_put(inet_twsk(sk));
1543                 goto discard_it;
1544         }
1545 
1546         tcp_v6_fill_cb(skb, hdr, th);
1547 
1548         if (skb->len < (th->doff<<2)) {
1549                 inet_twsk_put(inet_twsk(sk));
1550                 goto bad_packet;
1551         }
1552         if (tcp_checksum_complete(skb)) {
1553                 inet_twsk_put(inet_twsk(sk));
1554                 goto csum_error;
1555         }
1556 
1557         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1558         case TCP_TW_SYN:
1559         {
1560                 struct sock *sk2;
1561 
1562                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1563                                             &ipv6_hdr(skb)->saddr, th->source,
1564                                             &ipv6_hdr(skb)->daddr,
1565                                             ntohs(th->dest), tcp_v6_iif(skb));
1566                 if (sk2 != NULL) {
1567                         struct inet_timewait_sock *tw = inet_twsk(sk);
1568                         inet_twsk_deschedule(tw, &tcp_death_row);
1569                         inet_twsk_put(tw);
1570                         sk = sk2;
1571                         tcp_v6_restore_cb(skb);
1572                         goto process;
1573                 }
1574                 /* Fall through to ACK */
1575         }
1576         case TCP_TW_ACK:
1577                 tcp_v6_timewait_ack(sk, skb);
1578                 break;
1579         case TCP_TW_RST:
1580                 tcp_v6_restore_cb(skb);
1581                 goto no_tcp_socket;
1582         case TCP_TW_SUCCESS:
1583                 ;
1584         }
1585         goto discard_it;
1586 }
1587 
1588 static void tcp_v6_early_demux(struct sk_buff *skb)
1589 {
1590         const struct ipv6hdr *hdr;
1591         const struct tcphdr *th;
1592         struct sock *sk;
1593 
1594         if (skb->pkt_type != PACKET_HOST)
1595                 return;
1596 
1597         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1598                 return;
1599 
1600         hdr = ipv6_hdr(skb);
1601         th = tcp_hdr(skb);
1602 
1603         if (th->doff < sizeof(struct tcphdr) / 4)
1604                 return;
1605 
1606         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1607         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1608                                         &hdr->saddr, th->source,
1609                                         &hdr->daddr, ntohs(th->dest),
1610                                         inet6_iif(skb));
1611         if (sk) {
1612                 skb->sk = sk;
1613                 skb->destructor = sock_edemux;
1614                 if (sk->sk_state != TCP_TIME_WAIT) {
1615                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1616 
1617                         if (dst)
1618                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1619                         if (dst &&
1620                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1621                                 skb_dst_set_noref(skb, dst);
1622                 }
1623         }
1624 }
1625 
1626 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1627         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1628         .twsk_unique    = tcp_twsk_unique,
1629         .twsk_destructor = tcp_twsk_destructor,
1630 };
1631 
1632 static const struct inet_connection_sock_af_ops ipv6_specific = {
1633         .queue_xmit        = inet6_csk_xmit,
1634         .send_check        = tcp_v6_send_check,
1635         .rebuild_header    = inet6_sk_rebuild_header,
1636         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1637         .conn_request      = tcp_v6_conn_request,
1638         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1639         .net_header_len    = sizeof(struct ipv6hdr),
1640         .net_frag_header_len = sizeof(struct frag_hdr),
1641         .setsockopt        = ipv6_setsockopt,
1642         .getsockopt        = ipv6_getsockopt,
1643         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1644         .sockaddr_len      = sizeof(struct sockaddr_in6),
1645         .bind_conflict     = inet6_csk_bind_conflict,
1646 #ifdef CONFIG_COMPAT
1647         .compat_setsockopt = compat_ipv6_setsockopt,
1648         .compat_getsockopt = compat_ipv6_getsockopt,
1649 #endif
1650         .mtu_reduced       = tcp_v6_mtu_reduced,
1651 };
1652 
1653 #ifdef CONFIG_TCP_MD5SIG
1654 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1655         .md5_lookup     =       tcp_v6_md5_lookup,
1656         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1657         .md5_parse      =       tcp_v6_parse_md5_keys,
1658 };
1659 #endif
1660 
1661 /*
1662  *      TCP over IPv4 via INET6 API
1663  */
1664 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1665         .queue_xmit        = ip_queue_xmit,
1666         .send_check        = tcp_v4_send_check,
1667         .rebuild_header    = inet_sk_rebuild_header,
1668         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1669         .conn_request      = tcp_v6_conn_request,
1670         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1671         .net_header_len    = sizeof(struct iphdr),
1672         .setsockopt        = ipv6_setsockopt,
1673         .getsockopt        = ipv6_getsockopt,
1674         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1675         .sockaddr_len      = sizeof(struct sockaddr_in6),
1676         .bind_conflict     = inet6_csk_bind_conflict,
1677 #ifdef CONFIG_COMPAT
1678         .compat_setsockopt = compat_ipv6_setsockopt,
1679         .compat_getsockopt = compat_ipv6_getsockopt,
1680 #endif
1681         .mtu_reduced       = tcp_v4_mtu_reduced,
1682 };
1683 
1684 #ifdef CONFIG_TCP_MD5SIG
1685 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1686         .md5_lookup     =       tcp_v4_md5_lookup,
1687         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1688         .md5_parse      =       tcp_v6_parse_md5_keys,
1689 };
1690 #endif
1691 
1692 /* NOTE: A lot of things set to zero explicitly by call to
1693  *       sk_alloc() so need not be done here.
1694  */
1695 static int tcp_v6_init_sock(struct sock *sk)
1696 {
1697         struct inet_connection_sock *icsk = inet_csk(sk);
1698 
1699         tcp_init_sock(sk);
1700 
1701         icsk->icsk_af_ops = &ipv6_specific;
1702 
1703 #ifdef CONFIG_TCP_MD5SIG
1704         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1705 #endif
1706 
1707         return 0;
1708 }
1709 
1710 static void tcp_v6_destroy_sock(struct sock *sk)
1711 {
1712         tcp_v4_destroy_sock(sk);
1713         inet6_destroy_sock(sk);
1714 }
1715 
1716 #ifdef CONFIG_PROC_FS
1717 /* Proc filesystem TCPv6 sock list dumping. */
1718 static void get_openreq6(struct seq_file *seq,
1719                          const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1720 {
1721         int ttd = req->expires - jiffies;
1722         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1723         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1724 
1725         if (ttd < 0)
1726                 ttd = 0;
1727 
1728         seq_printf(seq,
1729                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1730                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1731                    i,
1732                    src->s6_addr32[0], src->s6_addr32[1],
1733                    src->s6_addr32[2], src->s6_addr32[3],
1734                    inet_rsk(req)->ir_num,
1735                    dest->s6_addr32[0], dest->s6_addr32[1],
1736                    dest->s6_addr32[2], dest->s6_addr32[3],
1737                    ntohs(inet_rsk(req)->ir_rmt_port),
1738                    TCP_SYN_RECV,
1739                    0, 0, /* could print option size, but that is af dependent. */
1740                    1,   /* timers active (only the expire timer) */
1741                    jiffies_to_clock_t(ttd),
1742                    req->num_timeout,
1743                    from_kuid_munged(seq_user_ns(seq), uid),
1744                    0,  /* non standard timer */
1745                    0, /* open_requests have no inode */
1746                    0, req);
1747 }
1748 
1749 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1750 {
1751         const struct in6_addr *dest, *src;
1752         __u16 destp, srcp;
1753         int timer_active;
1754         unsigned long timer_expires;
1755         const struct inet_sock *inet = inet_sk(sp);
1756         const struct tcp_sock *tp = tcp_sk(sp);
1757         const struct inet_connection_sock *icsk = inet_csk(sp);
1758         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1759 
1760         dest  = &sp->sk_v6_daddr;
1761         src   = &sp->sk_v6_rcv_saddr;
1762         destp = ntohs(inet->inet_dport);
1763         srcp  = ntohs(inet->inet_sport);
1764 
1765         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1766             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1767             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1768                 timer_active    = 1;
1769                 timer_expires   = icsk->icsk_timeout;
1770         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1771                 timer_active    = 4;
1772                 timer_expires   = icsk->icsk_timeout;
1773         } else if (timer_pending(&sp->sk_timer)) {
1774                 timer_active    = 2;
1775                 timer_expires   = sp->sk_timer.expires;
1776         } else {
1777                 timer_active    = 0;
1778                 timer_expires = jiffies;
1779         }
1780 
1781         seq_printf(seq,
1782                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1783                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1784                    i,
1785                    src->s6_addr32[0], src->s6_addr32[1],
1786                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1787                    dest->s6_addr32[0], dest->s6_addr32[1],
1788                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1789                    sp->sk_state,
1790                    tp->write_seq-tp->snd_una,
1791                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1792                    timer_active,
1793                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1794                    icsk->icsk_retransmits,
1795                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1796                    icsk->icsk_probes_out,
1797                    sock_i_ino(sp),
1798                    atomic_read(&sp->sk_refcnt), sp,
1799                    jiffies_to_clock_t(icsk->icsk_rto),
1800                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1801                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1802                    tp->snd_cwnd,
1803                    sp->sk_state == TCP_LISTEN ?
1804                         (fastopenq ? fastopenq->max_qlen : 0) :
1805                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1806                    );
1807 }
1808 
1809 static void get_timewait6_sock(struct seq_file *seq,
1810                                struct inet_timewait_sock *tw, int i)
1811 {
1812         const struct in6_addr *dest, *src;
1813         __u16 destp, srcp;
1814         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1815 
1816         dest = &tw->tw_v6_daddr;
1817         src  = &tw->tw_v6_rcv_saddr;
1818         destp = ntohs(tw->tw_dport);
1819         srcp  = ntohs(tw->tw_sport);
1820 
1821         seq_printf(seq,
1822                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1823                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1824                    i,
1825                    src->s6_addr32[0], src->s6_addr32[1],
1826                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1827                    dest->s6_addr32[0], dest->s6_addr32[1],
1828                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1829                    tw->tw_substate, 0, 0,
1830                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1831                    atomic_read(&tw->tw_refcnt), tw);
1832 }
1833 
1834 static int tcp6_seq_show(struct seq_file *seq, void *v)
1835 {
1836         struct tcp_iter_state *st;
1837         struct sock *sk = v;
1838 
1839         if (v == SEQ_START_TOKEN) {
1840                 seq_puts(seq,
1841                          "  sl  "
1842                          "local_address                         "
1843                          "remote_address                        "
1844                          "st tx_queue rx_queue tr tm->when retrnsmt"
1845                          "   uid  timeout inode\n");
1846                 goto out;
1847         }
1848         st = seq->private;
1849 
1850         switch (st->state) {
1851         case TCP_SEQ_STATE_LISTENING:
1852         case TCP_SEQ_STATE_ESTABLISHED:
1853                 if (sk->sk_state == TCP_TIME_WAIT)
1854                         get_timewait6_sock(seq, v, st->num);
1855                 else
1856                         get_tcp6_sock(seq, v, st->num);
1857                 break;
1858         case TCP_SEQ_STATE_OPENREQ:
1859                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1860                 break;
1861         }
1862 out:
1863         return 0;
1864 }
1865 
1866 static const struct file_operations tcp6_afinfo_seq_fops = {
1867         .owner   = THIS_MODULE,
1868         .open    = tcp_seq_open,
1869         .read    = seq_read,
1870         .llseek  = seq_lseek,
1871         .release = seq_release_net
1872 };
1873 
1874 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1875         .name           = "tcp6",
1876         .family         = AF_INET6,
1877         .seq_fops       = &tcp6_afinfo_seq_fops,
1878         .seq_ops        = {
1879                 .show           = tcp6_seq_show,
1880         },
1881 };
1882 
1883 int __net_init tcp6_proc_init(struct net *net)
1884 {
1885         return tcp_proc_register(net, &tcp6_seq_afinfo);
1886 }
1887 
1888 void tcp6_proc_exit(struct net *net)
1889 {
1890         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1891 }
1892 #endif
1893 
1894 static void tcp_v6_clear_sk(struct sock *sk, int size)
1895 {
1896         struct inet_sock *inet = inet_sk(sk);
1897 
1898         /* we do not want to clear pinet6 field, because of RCU lookups */
1899         sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1900 
1901         size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1902         memset(&inet->pinet6 + 1, 0, size);
1903 }
1904 
1905 struct proto tcpv6_prot = {
1906         .name                   = "TCPv6",
1907         .owner                  = THIS_MODULE,
1908         .close                  = tcp_close,
1909         .connect                = tcp_v6_connect,
1910         .disconnect             = tcp_disconnect,
1911         .accept                 = inet_csk_accept,
1912         .ioctl                  = tcp_ioctl,
1913         .init                   = tcp_v6_init_sock,
1914         .destroy                = tcp_v6_destroy_sock,
1915         .shutdown               = tcp_shutdown,
1916         .setsockopt             = tcp_setsockopt,
1917         .getsockopt             = tcp_getsockopt,
1918         .recvmsg                = tcp_recvmsg,
1919         .sendmsg                = tcp_sendmsg,
1920         .sendpage               = tcp_sendpage,
1921         .backlog_rcv            = tcp_v6_do_rcv,
1922         .release_cb             = tcp_release_cb,
1923         .hash                   = tcp_v6_hash,
1924         .unhash                 = inet_unhash,
1925         .get_port               = inet_csk_get_port,
1926         .enter_memory_pressure  = tcp_enter_memory_pressure,
1927         .stream_memory_free     = tcp_stream_memory_free,
1928         .sockets_allocated      = &tcp_sockets_allocated,
1929         .memory_allocated       = &tcp_memory_allocated,
1930         .memory_pressure        = &tcp_memory_pressure,
1931         .orphan_count           = &tcp_orphan_count,
1932         .sysctl_mem             = sysctl_tcp_mem,
1933         .sysctl_wmem            = sysctl_tcp_wmem,
1934         .sysctl_rmem            = sysctl_tcp_rmem,
1935         .max_header             = MAX_TCP_HEADER,
1936         .obj_size               = sizeof(struct tcp6_sock),
1937         .slab_flags             = SLAB_DESTROY_BY_RCU,
1938         .twsk_prot              = &tcp6_timewait_sock_ops,
1939         .rsk_prot               = &tcp6_request_sock_ops,
1940         .h.hashinfo             = &tcp_hashinfo,
1941         .no_autobind            = true,
1942 #ifdef CONFIG_COMPAT
1943         .compat_setsockopt      = compat_tcp_setsockopt,
1944         .compat_getsockopt      = compat_tcp_getsockopt,
1945 #endif
1946 #ifdef CONFIG_MEMCG_KMEM
1947         .proto_cgroup           = tcp_proto_cgroup,
1948 #endif
1949         .clear_sk               = tcp_v6_clear_sk,
1950 };
1951 
1952 static const struct inet6_protocol tcpv6_protocol = {
1953         .early_demux    =       tcp_v6_early_demux,
1954         .handler        =       tcp_v6_rcv,
1955         .err_handler    =       tcp_v6_err,
1956         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1957 };
1958 
1959 static struct inet_protosw tcpv6_protosw = {
1960         .type           =       SOCK_STREAM,
1961         .protocol       =       IPPROTO_TCP,
1962         .prot           =       &tcpv6_prot,
1963         .ops            =       &inet6_stream_ops,
1964         .flags          =       INET_PROTOSW_PERMANENT |
1965                                 INET_PROTOSW_ICSK,
1966 };
1967 
1968 static int __net_init tcpv6_net_init(struct net *net)
1969 {
1970         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1971                                     SOCK_RAW, IPPROTO_TCP, net);
1972 }
1973 
1974 static void __net_exit tcpv6_net_exit(struct net *net)
1975 {
1976         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1977 }
1978 
1979 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1980 {
1981         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1982 }
1983 
1984 static struct pernet_operations tcpv6_net_ops = {
1985         .init       = tcpv6_net_init,
1986         .exit       = tcpv6_net_exit,
1987         .exit_batch = tcpv6_net_exit_batch,
1988 };
1989 
1990 int __init tcpv6_init(void)
1991 {
1992         int ret;
1993 
1994         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1995         if (ret)
1996                 goto out;
1997 
1998         /* register inet6 protocol */
1999         ret = inet6_register_protosw(&tcpv6_protosw);
2000         if (ret)
2001                 goto out_tcpv6_protocol;
2002 
2003         ret = register_pernet_subsys(&tcpv6_net_ops);
2004         if (ret)
2005                 goto out_tcpv6_protosw;
2006 out:
2007         return ret;
2008 
2009 out_tcpv6_protosw:
2010         inet6_unregister_protosw(&tcpv6_protosw);
2011 out_tcpv6_protocol:
2012         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2013         goto out;
2014 }
2015 
2016 void tcpv6_exit(void)
2017 {
2018         unregister_pernet_subsys(&tcpv6_net_ops);
2019         inet6_unregister_protosw(&tcpv6_protosw);
2020         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2021 }
2022 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp