~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv6/tcp_ipv6.c

Version: ~ [ linux-6.1-rc7 ] ~ [ linux-6.0.10 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.80 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.156 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.225 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.267 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.300 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.334 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      TCP over IPv6
  3  *      Linux INET6 implementation
  4  *
  5  *      Authors:
  6  *      Pedro Roque             <roque@di.fc.ul.pt>
  7  *
  8  *      Based on:
  9  *      linux/net/ipv4/tcp.c
 10  *      linux/net/ipv4/tcp_input.c
 11  *      linux/net/ipv4/tcp_output.c
 12  *
 13  *      Fixes:
 14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
 15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 17  *                                      a single port at the same time.
 18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
 19  *
 20  *      This program is free software; you can redistribute it and/or
 21  *      modify it under the terms of the GNU General Public License
 22  *      as published by the Free Software Foundation; either version
 23  *      2 of the License, or (at your option) any later version.
 24  */
 25 
 26 #include <linux/bottom_half.h>
 27 #include <linux/module.h>
 28 #include <linux/errno.h>
 29 #include <linux/types.h>
 30 #include <linux/socket.h>
 31 #include <linux/sockios.h>
 32 #include <linux/net.h>
 33 #include <linux/jiffies.h>
 34 #include <linux/in.h>
 35 #include <linux/in6.h>
 36 #include <linux/netdevice.h>
 37 #include <linux/init.h>
 38 #include <linux/jhash.h>
 39 #include <linux/ipsec.h>
 40 #include <linux/times.h>
 41 #include <linux/slab.h>
 42 #include <linux/uaccess.h>
 43 #include <linux/ipv6.h>
 44 #include <linux/icmpv6.h>
 45 #include <linux/random.h>
 46 
 47 #include <net/tcp.h>
 48 #include <net/ndisc.h>
 49 #include <net/inet6_hashtables.h>
 50 #include <net/inet6_connection_sock.h>
 51 #include <net/ipv6.h>
 52 #include <net/transp_v6.h>
 53 #include <net/addrconf.h>
 54 #include <net/ip6_route.h>
 55 #include <net/ip6_checksum.h>
 56 #include <net/inet_ecn.h>
 57 #include <net/protocol.h>
 58 #include <net/xfrm.h>
 59 #include <net/snmp.h>
 60 #include <net/dsfield.h>
 61 #include <net/timewait_sock.h>
 62 #include <net/inet_common.h>
 63 #include <net/secure_seq.h>
 64 #include <net/busy_poll.h>
 65 
 66 #include <linux/proc_fs.h>
 67 #include <linux/seq_file.h>
 68 
 69 #include <crypto/hash.h>
 70 #include <linux/scatterlist.h>
 71 
 72 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
 73 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 74                                       struct request_sock *req);
 75 
 76 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 77 
 78 static const struct inet_connection_sock_af_ops ipv6_mapped;
 79 static const struct inet_connection_sock_af_ops ipv6_specific;
 80 #ifdef CONFIG_TCP_MD5SIG
 81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
 82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 83 #else
 84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 85                                                    const struct in6_addr *addr)
 86 {
 87         return NULL;
 88 }
 89 #endif
 90 
 91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 92 {
 93         struct dst_entry *dst = skb_dst(skb);
 94 
 95         if (dst && dst_hold_safe(dst)) {
 96                 const struct rt6_info *rt = (const struct rt6_info *)dst;
 97 
 98                 sk->sk_rx_dst = dst;
 99                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101         }
102 }
103 
104 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
105 {
106         return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
107                                 ipv6_hdr(skb)->saddr.s6_addr32,
108                                 tcp_hdr(skb)->dest,
109                                 tcp_hdr(skb)->source);
110 }
111 
112 static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
113 {
114         return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
115                                    ipv6_hdr(skb)->saddr.s6_addr32);
116 }
117 
118 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
119                           int addr_len)
120 {
121         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
122         struct inet_sock *inet = inet_sk(sk);
123         struct inet_connection_sock *icsk = inet_csk(sk);
124         struct ipv6_pinfo *np = inet6_sk(sk);
125         struct tcp_sock *tp = tcp_sk(sk);
126         struct in6_addr *saddr = NULL, *final_p, final;
127         struct ipv6_txoptions *opt;
128         struct flowi6 fl6;
129         struct dst_entry *dst;
130         int addr_type;
131         int err;
132         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
133 
134         if (addr_len < SIN6_LEN_RFC2133)
135                 return -EINVAL;
136 
137         if (usin->sin6_family != AF_INET6)
138                 return -EAFNOSUPPORT;
139 
140         memset(&fl6, 0, sizeof(fl6));
141 
142         if (np->sndflow) {
143                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144                 IP6_ECN_flow_init(fl6.flowlabel);
145                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
146                         struct ip6_flowlabel *flowlabel;
147                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
148                         if (!flowlabel)
149                                 return -EINVAL;
150                         fl6_sock_release(flowlabel);
151                 }
152         }
153 
154         /*
155          *      connect() to INADDR_ANY means loopback (BSD'ism).
156          */
157 
158         if (ipv6_addr_any(&usin->sin6_addr)) {
159                 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
160                         ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
161                                                &usin->sin6_addr);
162                 else
163                         usin->sin6_addr = in6addr_loopback;
164         }
165 
166         addr_type = ipv6_addr_type(&usin->sin6_addr);
167 
168         if (addr_type & IPV6_ADDR_MULTICAST)
169                 return -ENETUNREACH;
170 
171         if (addr_type&IPV6_ADDR_LINKLOCAL) {
172                 if (addr_len >= sizeof(struct sockaddr_in6) &&
173                     usin->sin6_scope_id) {
174                         /* If interface is set while binding, indices
175                          * must coincide.
176                          */
177                         if (sk->sk_bound_dev_if &&
178                             sk->sk_bound_dev_if != usin->sin6_scope_id)
179                                 return -EINVAL;
180 
181                         sk->sk_bound_dev_if = usin->sin6_scope_id;
182                 }
183 
184                 /* Connect to link-local address requires an interface */
185                 if (!sk->sk_bound_dev_if)
186                         return -EINVAL;
187         }
188 
189         if (tp->rx_opt.ts_recent_stamp &&
190             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
191                 tp->rx_opt.ts_recent = 0;
192                 tp->rx_opt.ts_recent_stamp = 0;
193                 tp->write_seq = 0;
194         }
195 
196         sk->sk_v6_daddr = usin->sin6_addr;
197         np->flow_label = fl6.flowlabel;
198 
199         /*
200          *      TCP over IPv4
201          */
202 
203         if (addr_type & IPV6_ADDR_MAPPED) {
204                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205                 struct sockaddr_in sin;
206 
207                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208 
209                 if (__ipv6_only_sock(sk))
210                         return -ENETUNREACH;
211 
212                 sin.sin_family = AF_INET;
213                 sin.sin_port = usin->sin6_port;
214                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215 
216                 icsk->icsk_af_ops = &ipv6_mapped;
217                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221 
222                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223 
224                 if (err) {
225                         icsk->icsk_ext_hdr_len = exthdrlen;
226                         icsk->icsk_af_ops = &ipv6_specific;
227                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229                         tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231                         goto failure;
232                 }
233                 np->saddr = sk->sk_v6_rcv_saddr;
234 
235                 return err;
236         }
237 
238         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
239                 saddr = &sk->sk_v6_rcv_saddr;
240 
241         fl6.flowi6_proto = IPPROTO_TCP;
242         fl6.daddr = sk->sk_v6_daddr;
243         fl6.saddr = saddr ? *saddr : np->saddr;
244         fl6.flowi6_oif = sk->sk_bound_dev_if;
245         fl6.flowi6_mark = sk->sk_mark;
246         fl6.fl6_dport = usin->sin6_port;
247         fl6.fl6_sport = inet->inet_sport;
248         fl6.flowi6_uid = sk->sk_uid;
249 
250         opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
251         final_p = fl6_update_dst(&fl6, opt, &final);
252 
253         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
254 
255         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
256         if (IS_ERR(dst)) {
257                 err = PTR_ERR(dst);
258                 goto failure;
259         }
260 
261         if (!saddr) {
262                 saddr = &fl6.saddr;
263                 sk->sk_v6_rcv_saddr = *saddr;
264         }
265 
266         /* set the source address */
267         np->saddr = *saddr;
268         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
269 
270         sk->sk_gso_type = SKB_GSO_TCPV6;
271         ip6_dst_store(sk, dst, NULL, NULL);
272 
273         if (tcp_death_row->sysctl_tw_recycle &&
274             !tp->rx_opt.ts_recent_stamp &&
275             ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
276                 tcp_fetch_timewait_stamp(sk, dst);
277 
278         icsk->icsk_ext_hdr_len = 0;
279         if (opt)
280                 icsk->icsk_ext_hdr_len = opt->opt_flen +
281                                          opt->opt_nflen;
282 
283         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
284 
285         inet->inet_dport = usin->sin6_port;
286 
287         tcp_set_state(sk, TCP_SYN_SENT);
288         err = inet6_hash_connect(tcp_death_row, sk);
289         if (err)
290                 goto late_failure;
291 
292         sk_set_txhash(sk);
293 
294         if (likely(!tp->repair)) {
295                 if (!tp->write_seq)
296                         tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
297                                                          sk->sk_v6_daddr.s6_addr32,
298                                                          inet->inet_sport,
299                                                          inet->inet_dport);
300                 tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
301                                                    sk->sk_v6_daddr.s6_addr32);
302         }
303 
304         if (tcp_fastopen_defer_connect(sk, &err))
305                 return err;
306         if (err)
307                 goto late_failure;
308 
309         err = tcp_connect(sk);
310         if (err)
311                 goto late_failure;
312 
313         return 0;
314 
315 late_failure:
316         tcp_set_state(sk, TCP_CLOSE);
317 failure:
318         inet->inet_dport = 0;
319         sk->sk_route_caps = 0;
320         return err;
321 }
322 
323 static void tcp_v6_mtu_reduced(struct sock *sk)
324 {
325         struct dst_entry *dst;
326 
327         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
328                 return;
329 
330         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
331         if (!dst)
332                 return;
333 
334         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
335                 tcp_sync_mss(sk, dst_mtu(dst));
336                 tcp_simple_retransmit(sk);
337         }
338 }
339 
340 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
341                 u8 type, u8 code, int offset, __be32 info)
342 {
343         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
344         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
345         struct net *net = dev_net(skb->dev);
346         struct request_sock *fastopen;
347         struct ipv6_pinfo *np;
348         struct tcp_sock *tp;
349         __u32 seq, snd_una;
350         struct sock *sk;
351         bool fatal;
352         int err;
353 
354         sk = __inet6_lookup_established(net, &tcp_hashinfo,
355                                         &hdr->daddr, th->dest,
356                                         &hdr->saddr, ntohs(th->source),
357                                         skb->dev->ifindex);
358 
359         if (!sk) {
360                 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
361                                   ICMP6_MIB_INERRORS);
362                 return;
363         }
364 
365         if (sk->sk_state == TCP_TIME_WAIT) {
366                 inet_twsk_put(inet_twsk(sk));
367                 return;
368         }
369         seq = ntohl(th->seq);
370         fatal = icmpv6_err_convert(type, code, &err);
371         if (sk->sk_state == TCP_NEW_SYN_RECV)
372                 return tcp_req_err(sk, seq, fatal);
373 
374         bh_lock_sock(sk);
375         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
376                 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
377 
378         if (sk->sk_state == TCP_CLOSE)
379                 goto out;
380 
381         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
382                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
383                 goto out;
384         }
385 
386         tp = tcp_sk(sk);
387         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
388         fastopen = tp->fastopen_rsk;
389         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
390         if (sk->sk_state != TCP_LISTEN &&
391             !between(seq, snd_una, tp->snd_nxt)) {
392                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
393                 goto out;
394         }
395 
396         np = inet6_sk(sk);
397 
398         if (type == NDISC_REDIRECT) {
399                 if (!sock_owned_by_user(sk)) {
400                         struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
401 
402                         if (dst)
403                                 dst->ops->redirect(dst, sk, skb);
404                 }
405                 goto out;
406         }
407 
408         if (type == ICMPV6_PKT_TOOBIG) {
409                 /* We are not interested in TCP_LISTEN and open_requests
410                  * (SYN-ACKs send out by Linux are always <576bytes so
411                  * they should go through unfragmented).
412                  */
413                 if (sk->sk_state == TCP_LISTEN)
414                         goto out;
415 
416                 if (!ip6_sk_accept_pmtu(sk))
417                         goto out;
418 
419                 tp->mtu_info = ntohl(info);
420                 if (!sock_owned_by_user(sk))
421                         tcp_v6_mtu_reduced(sk);
422                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
423                                            &sk->sk_tsq_flags))
424                         sock_hold(sk);
425                 goto out;
426         }
427 
428 
429         /* Might be for an request_sock */
430         switch (sk->sk_state) {
431         case TCP_SYN_SENT:
432         case TCP_SYN_RECV:
433                 /* Only in fast or simultaneous open. If a fast open socket is
434                  * is already accepted it is treated as a connected one below.
435                  */
436                 if (fastopen && !fastopen->sk)
437                         break;
438 
439                 if (!sock_owned_by_user(sk)) {
440                         sk->sk_err = err;
441                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
442 
443                         tcp_done(sk);
444                 } else
445                         sk->sk_err_soft = err;
446                 goto out;
447         }
448 
449         if (!sock_owned_by_user(sk) && np->recverr) {
450                 sk->sk_err = err;
451                 sk->sk_error_report(sk);
452         } else
453                 sk->sk_err_soft = err;
454 
455 out:
456         bh_unlock_sock(sk);
457         sock_put(sk);
458 }
459 
460 
461 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
462                               struct flowi *fl,
463                               struct request_sock *req,
464                               struct tcp_fastopen_cookie *foc,
465                               enum tcp_synack_type synack_type)
466 {
467         struct inet_request_sock *ireq = inet_rsk(req);
468         struct ipv6_pinfo *np = inet6_sk(sk);
469         struct ipv6_txoptions *opt;
470         struct flowi6 *fl6 = &fl->u.ip6;
471         struct sk_buff *skb;
472         int err = -ENOMEM;
473 
474         /* First, grab a route. */
475         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
476                                                IPPROTO_TCP)) == NULL)
477                 goto done;
478 
479         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
480 
481         if (skb) {
482                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
483                                     &ireq->ir_v6_rmt_addr);
484 
485                 fl6->daddr = ireq->ir_v6_rmt_addr;
486                 if (np->repflow && ireq->pktopts)
487                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
488 
489                 rcu_read_lock();
490                 opt = ireq->ipv6_opt;
491                 if (!opt)
492                         opt = rcu_dereference(np->opt);
493                 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
494                 rcu_read_unlock();
495                 err = net_xmit_eval(err);
496         }
497 
498 done:
499         return err;
500 }
501 
502 
503 static void tcp_v6_reqsk_destructor(struct request_sock *req)
504 {
505         kfree(inet_rsk(req)->ipv6_opt);
506         kfree_skb(inet_rsk(req)->pktopts);
507 }
508 
509 #ifdef CONFIG_TCP_MD5SIG
510 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
511                                                    const struct in6_addr *addr)
512 {
513         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
514 }
515 
516 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
517                                                 const struct sock *addr_sk)
518 {
519         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
520 }
521 
522 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
523                                  int optlen)
524 {
525         struct tcp_md5sig cmd;
526         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
527 
528         if (optlen < sizeof(cmd))
529                 return -EINVAL;
530 
531         if (copy_from_user(&cmd, optval, sizeof(cmd)))
532                 return -EFAULT;
533 
534         if (sin6->sin6_family != AF_INET6)
535                 return -EINVAL;
536 
537         if (!cmd.tcpm_keylen) {
538                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
539                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
540                                               AF_INET);
541                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
542                                       AF_INET6);
543         }
544 
545         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
546                 return -EINVAL;
547 
548         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
549                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
550                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
551 
552         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
553                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
554 }
555 
556 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
557                                    const struct in6_addr *daddr,
558                                    const struct in6_addr *saddr,
559                                    const struct tcphdr *th, int nbytes)
560 {
561         struct tcp6_pseudohdr *bp;
562         struct scatterlist sg;
563         struct tcphdr *_th;
564 
565         bp = hp->scratch;
566         /* 1. TCP pseudo-header (RFC2460) */
567         bp->saddr = *saddr;
568         bp->daddr = *daddr;
569         bp->protocol = cpu_to_be32(IPPROTO_TCP);
570         bp->len = cpu_to_be32(nbytes);
571 
572         _th = (struct tcphdr *)(bp + 1);
573         memcpy(_th, th, sizeof(*th));
574         _th->check = 0;
575 
576         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
577         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
578                                 sizeof(*bp) + sizeof(*th));
579         return crypto_ahash_update(hp->md5_req);
580 }
581 
582 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
583                                const struct in6_addr *daddr, struct in6_addr *saddr,
584                                const struct tcphdr *th)
585 {
586         struct tcp_md5sig_pool *hp;
587         struct ahash_request *req;
588 
589         hp = tcp_get_md5sig_pool();
590         if (!hp)
591                 goto clear_hash_noput;
592         req = hp->md5_req;
593 
594         if (crypto_ahash_init(req))
595                 goto clear_hash;
596         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
597                 goto clear_hash;
598         if (tcp_md5_hash_key(hp, key))
599                 goto clear_hash;
600         ahash_request_set_crypt(req, NULL, md5_hash, 0);
601         if (crypto_ahash_final(req))
602                 goto clear_hash;
603 
604         tcp_put_md5sig_pool();
605         return 0;
606 
607 clear_hash:
608         tcp_put_md5sig_pool();
609 clear_hash_noput:
610         memset(md5_hash, 0, 16);
611         return 1;
612 }
613 
614 static int tcp_v6_md5_hash_skb(char *md5_hash,
615                                const struct tcp_md5sig_key *key,
616                                const struct sock *sk,
617                                const struct sk_buff *skb)
618 {
619         const struct in6_addr *saddr, *daddr;
620         struct tcp_md5sig_pool *hp;
621         struct ahash_request *req;
622         const struct tcphdr *th = tcp_hdr(skb);
623 
624         if (sk) { /* valid for establish/request sockets */
625                 saddr = &sk->sk_v6_rcv_saddr;
626                 daddr = &sk->sk_v6_daddr;
627         } else {
628                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629                 saddr = &ip6h->saddr;
630                 daddr = &ip6h->daddr;
631         }
632 
633         hp = tcp_get_md5sig_pool();
634         if (!hp)
635                 goto clear_hash_noput;
636         req = hp->md5_req;
637 
638         if (crypto_ahash_init(req))
639                 goto clear_hash;
640 
641         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
642                 goto clear_hash;
643         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
644                 goto clear_hash;
645         if (tcp_md5_hash_key(hp, key))
646                 goto clear_hash;
647         ahash_request_set_crypt(req, NULL, md5_hash, 0);
648         if (crypto_ahash_final(req))
649                 goto clear_hash;
650 
651         tcp_put_md5sig_pool();
652         return 0;
653 
654 clear_hash:
655         tcp_put_md5sig_pool();
656 clear_hash_noput:
657         memset(md5_hash, 0, 16);
658         return 1;
659 }
660 
661 #endif
662 
663 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
664                                     const struct sk_buff *skb)
665 {
666 #ifdef CONFIG_TCP_MD5SIG
667         const __u8 *hash_location = NULL;
668         struct tcp_md5sig_key *hash_expected;
669         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
670         const struct tcphdr *th = tcp_hdr(skb);
671         int genhash;
672         u8 newhash[16];
673 
674         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
675         hash_location = tcp_parse_md5sig_option(th);
676 
677         /* We've parsed the options - do we have a hash? */
678         if (!hash_expected && !hash_location)
679                 return false;
680 
681         if (hash_expected && !hash_location) {
682                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
683                 return true;
684         }
685 
686         if (!hash_expected && hash_location) {
687                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
688                 return true;
689         }
690 
691         /* check the signature */
692         genhash = tcp_v6_md5_hash_skb(newhash,
693                                       hash_expected,
694                                       NULL, skb);
695 
696         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
697                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
698                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
699                                      genhash ? "failed" : "mismatch",
700                                      &ip6h->saddr, ntohs(th->source),
701                                      &ip6h->daddr, ntohs(th->dest));
702                 return true;
703         }
704 #endif
705         return false;
706 }
707 
708 static void tcp_v6_init_req(struct request_sock *req,
709                             const struct sock *sk_listener,
710                             struct sk_buff *skb)
711 {
712         struct inet_request_sock *ireq = inet_rsk(req);
713         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
714 
715         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
716         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
717 
718         /* So that link locals have meaning */
719         if (!sk_listener->sk_bound_dev_if &&
720             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
721                 ireq->ir_iif = tcp_v6_iif(skb);
722 
723         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
724             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
725              np->rxopt.bits.rxinfo ||
726              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
727              np->rxopt.bits.rxohlim || np->repflow)) {
728                 atomic_inc(&skb->users);
729                 ireq->pktopts = skb;
730         }
731 }
732 
733 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
734                                           struct flowi *fl,
735                                           const struct request_sock *req,
736                                           bool *strict)
737 {
738         if (strict)
739                 *strict = true;
740         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
741 }
742 
743 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
744         .family         =       AF_INET6,
745         .obj_size       =       sizeof(struct tcp6_request_sock),
746         .rtx_syn_ack    =       tcp_rtx_synack,
747         .send_ack       =       tcp_v6_reqsk_send_ack,
748         .destructor     =       tcp_v6_reqsk_destructor,
749         .send_reset     =       tcp_v6_send_reset,
750         .syn_ack_timeout =      tcp_syn_ack_timeout,
751 };
752 
753 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
754         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
755                                 sizeof(struct ipv6hdr),
756 #ifdef CONFIG_TCP_MD5SIG
757         .req_md5_lookup =       tcp_v6_md5_lookup,
758         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
759 #endif
760         .init_req       =       tcp_v6_init_req,
761 #ifdef CONFIG_SYN_COOKIES
762         .cookie_init_seq =      cookie_v6_init_sequence,
763 #endif
764         .route_req      =       tcp_v6_route_req,
765         .init_seq       =       tcp_v6_init_seq,
766         .init_ts_off    =       tcp_v6_init_ts_off,
767         .send_synack    =       tcp_v6_send_synack,
768 };
769 
770 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
771                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
772                                  int oif, struct tcp_md5sig_key *key, int rst,
773                                  u8 tclass, __be32 label)
774 {
775         const struct tcphdr *th = tcp_hdr(skb);
776         struct tcphdr *t1;
777         struct sk_buff *buff;
778         struct flowi6 fl6;
779         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
780         struct sock *ctl_sk = net->ipv6.tcp_sk;
781         unsigned int tot_len = sizeof(struct tcphdr);
782         struct dst_entry *dst;
783         __be32 *topt;
784 
785         if (tsecr)
786                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
787 #ifdef CONFIG_TCP_MD5SIG
788         if (key)
789                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
790 #endif
791 
792         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
793                          GFP_ATOMIC);
794         if (!buff)
795                 return;
796 
797         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
798 
799         t1 = (struct tcphdr *) skb_push(buff, tot_len);
800         skb_reset_transport_header(buff);
801 
802         /* Swap the send and the receive. */
803         memset(t1, 0, sizeof(*t1));
804         t1->dest = th->source;
805         t1->source = th->dest;
806         t1->doff = tot_len / 4;
807         t1->seq = htonl(seq);
808         t1->ack_seq = htonl(ack);
809         t1->ack = !rst || !th->ack;
810         t1->rst = rst;
811         t1->window = htons(win);
812 
813         topt = (__be32 *)(t1 + 1);
814 
815         if (tsecr) {
816                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
817                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
818                 *topt++ = htonl(tsval);
819                 *topt++ = htonl(tsecr);
820         }
821 
822 #ifdef CONFIG_TCP_MD5SIG
823         if (key) {
824                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
825                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
826                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
827                                     &ipv6_hdr(skb)->saddr,
828                                     &ipv6_hdr(skb)->daddr, t1);
829         }
830 #endif
831 
832         memset(&fl6, 0, sizeof(fl6));
833         fl6.daddr = ipv6_hdr(skb)->saddr;
834         fl6.saddr = ipv6_hdr(skb)->daddr;
835         fl6.flowlabel = label;
836 
837         buff->ip_summed = CHECKSUM_PARTIAL;
838         buff->csum = 0;
839 
840         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
841 
842         fl6.flowi6_proto = IPPROTO_TCP;
843         if (rt6_need_strict(&fl6.daddr) && !oif)
844                 fl6.flowi6_oif = tcp_v6_iif(skb);
845         else {
846                 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
847                         oif = skb->skb_iif;
848 
849                 fl6.flowi6_oif = oif;
850         }
851 
852         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
853         fl6.fl6_dport = t1->dest;
854         fl6.fl6_sport = t1->source;
855         fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
856         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
857 
858         /* Pass a socket to ip6_dst_lookup either it is for RST
859          * Underlying function will use this to retrieve the network
860          * namespace
861          */
862         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
863         if (!IS_ERR(dst)) {
864                 skb_dst_set(buff, dst);
865                 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
866                 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
867                 if (rst)
868                         TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
869                 return;
870         }
871 
872         kfree_skb(buff);
873 }
874 
875 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
876 {
877         const struct tcphdr *th = tcp_hdr(skb);
878         u32 seq = 0, ack_seq = 0;
879         struct tcp_md5sig_key *key = NULL;
880 #ifdef CONFIG_TCP_MD5SIG
881         const __u8 *hash_location = NULL;
882         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
883         unsigned char newhash[16];
884         int genhash;
885         struct sock *sk1 = NULL;
886 #endif
887         int oif;
888 
889         if (th->rst)
890                 return;
891 
892         /* If sk not NULL, it means we did a successful lookup and incoming
893          * route had to be correct. prequeue might have dropped our dst.
894          */
895         if (!sk && !ipv6_unicast_destination(skb))
896                 return;
897 
898 #ifdef CONFIG_TCP_MD5SIG
899         rcu_read_lock();
900         hash_location = tcp_parse_md5sig_option(th);
901         if (sk && sk_fullsock(sk)) {
902                 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
903         } else if (hash_location) {
904                 /*
905                  * active side is lost. Try to find listening socket through
906                  * source port, and then find md5 key through listening socket.
907                  * we are not loose security here:
908                  * Incoming packet is checked with md5 hash with finding key,
909                  * no RST generated if md5 hash doesn't match.
910                  */
911                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
912                                            &tcp_hashinfo, NULL, 0,
913                                            &ipv6h->saddr,
914                                            th->source, &ipv6h->daddr,
915                                            ntohs(th->source), tcp_v6_iif(skb));
916                 if (!sk1)
917                         goto out;
918 
919                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
920                 if (!key)
921                         goto out;
922 
923                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
924                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
925                         goto out;
926         }
927 #endif
928 
929         if (th->ack)
930                 seq = ntohl(th->ack_seq);
931         else
932                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
933                           (th->doff << 2);
934 
935         oif = sk ? sk->sk_bound_dev_if : 0;
936         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
937 
938 #ifdef CONFIG_TCP_MD5SIG
939 out:
940         rcu_read_unlock();
941 #endif
942 }
943 
944 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
945                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
946                             struct tcp_md5sig_key *key, u8 tclass,
947                             __be32 label)
948 {
949         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
950                              tclass, label);
951 }
952 
953 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
954 {
955         struct inet_timewait_sock *tw = inet_twsk(sk);
956         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
957 
958         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
959                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
960                         tcp_time_stamp + tcptw->tw_ts_offset,
961                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
962                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
963 
964         inet_twsk_put(tw);
965 }
966 
967 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
968                                   struct request_sock *req)
969 {
970         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
971          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
972          */
973         /* RFC 7323 2.3
974          * The window field (SEG.WND) of every outgoing segment, with the
975          * exception of <SYN> segments, MUST be right-shifted by
976          * Rcv.Wind.Shift bits:
977          */
978         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
979                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
980                         tcp_rsk(req)->rcv_nxt,
981                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
982                         tcp_time_stamp + tcp_rsk(req)->ts_off,
983                         req->ts_recent, sk->sk_bound_dev_if,
984                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
985                         0, 0);
986 }
987 
988 
989 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
990 {
991 #ifdef CONFIG_SYN_COOKIES
992         const struct tcphdr *th = tcp_hdr(skb);
993 
994         if (!th->syn)
995                 sk = cookie_v6_check(sk, skb);
996 #endif
997         return sk;
998 }
999 
1000 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1001 {
1002         if (skb->protocol == htons(ETH_P_IP))
1003                 return tcp_v4_conn_request(sk, skb);
1004 
1005         if (!ipv6_unicast_destination(skb))
1006                 goto drop;
1007 
1008         return tcp_conn_request(&tcp6_request_sock_ops,
1009                                 &tcp_request_sock_ipv6_ops, sk, skb);
1010 
1011 drop:
1012         tcp_listendrop(sk);
1013         return 0; /* don't send reset */
1014 }
1015 
1016 static void tcp_v6_restore_cb(struct sk_buff *skb)
1017 {
1018         /* We need to move header back to the beginning if xfrm6_policy_check()
1019          * and tcp_v6_fill_cb() are going to be called again.
1020          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1021          */
1022         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1023                 sizeof(struct inet6_skb_parm));
1024 }
1025 
1026 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1027                                          struct request_sock *req,
1028                                          struct dst_entry *dst,
1029                                          struct request_sock *req_unhash,
1030                                          bool *own_req)
1031 {
1032         struct inet_request_sock *ireq;
1033         struct ipv6_pinfo *newnp;
1034         const struct ipv6_pinfo *np = inet6_sk(sk);
1035         struct ipv6_txoptions *opt;
1036         struct tcp6_sock *newtcp6sk;
1037         struct inet_sock *newinet;
1038         struct tcp_sock *newtp;
1039         struct sock *newsk;
1040 #ifdef CONFIG_TCP_MD5SIG
1041         struct tcp_md5sig_key *key;
1042 #endif
1043         struct flowi6 fl6;
1044 
1045         if (skb->protocol == htons(ETH_P_IP)) {
1046                 /*
1047                  *      v6 mapped
1048                  */
1049 
1050                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1051                                              req_unhash, own_req);
1052 
1053                 if (!newsk)
1054                         return NULL;
1055 
1056                 newtcp6sk = (struct tcp6_sock *)newsk;
1057                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1058 
1059                 newinet = inet_sk(newsk);
1060                 newnp = inet6_sk(newsk);
1061                 newtp = tcp_sk(newsk);
1062 
1063                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1064 
1065                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1066 
1067                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1068                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1069 #ifdef CONFIG_TCP_MD5SIG
1070                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1071 #endif
1072 
1073                 newnp->ipv6_mc_list = NULL;
1074                 newnp->ipv6_ac_list = NULL;
1075                 newnp->ipv6_fl_list = NULL;
1076                 newnp->pktoptions  = NULL;
1077                 newnp->opt         = NULL;
1078                 newnp->mcast_oif   = tcp_v6_iif(skb);
1079                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1080                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1081                 if (np->repflow)
1082                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1083 
1084                 /*
1085                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1086                  * here, tcp_create_openreq_child now does this for us, see the comment in
1087                  * that function for the gory details. -acme
1088                  */
1089 
1090                 /* It is tricky place. Until this moment IPv4 tcp
1091                    worked with IPv6 icsk.icsk_af_ops.
1092                    Sync it now.
1093                  */
1094                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1095 
1096                 return newsk;
1097         }
1098 
1099         ireq = inet_rsk(req);
1100 
1101         if (sk_acceptq_is_full(sk))
1102                 goto out_overflow;
1103 
1104         if (!dst) {
1105                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1106                 if (!dst)
1107                         goto out;
1108         }
1109 
1110         newsk = tcp_create_openreq_child(sk, req, skb);
1111         if (!newsk)
1112                 goto out_nonewsk;
1113 
1114         /*
1115          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1116          * count here, tcp_create_openreq_child now does this for us, see the
1117          * comment in that function for the gory details. -acme
1118          */
1119 
1120         newsk->sk_gso_type = SKB_GSO_TCPV6;
1121         ip6_dst_store(newsk, dst, NULL, NULL);
1122         inet6_sk_rx_dst_set(newsk, skb);
1123 
1124         newtcp6sk = (struct tcp6_sock *)newsk;
1125         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1126 
1127         newtp = tcp_sk(newsk);
1128         newinet = inet_sk(newsk);
1129         newnp = inet6_sk(newsk);
1130 
1131         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1132 
1133         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1134         newnp->saddr = ireq->ir_v6_loc_addr;
1135         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1136         newsk->sk_bound_dev_if = ireq->ir_iif;
1137 
1138         /* Now IPv6 options...
1139 
1140            First: no IPv4 options.
1141          */
1142         newinet->inet_opt = NULL;
1143         newnp->ipv6_mc_list = NULL;
1144         newnp->ipv6_ac_list = NULL;
1145         newnp->ipv6_fl_list = NULL;
1146 
1147         /* Clone RX bits */
1148         newnp->rxopt.all = np->rxopt.all;
1149 
1150         newnp->pktoptions = NULL;
1151         newnp->opt        = NULL;
1152         newnp->mcast_oif  = tcp_v6_iif(skb);
1153         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1154         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1155         if (np->repflow)
1156                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1157 
1158         /* Clone native IPv6 options from listening socket (if any)
1159 
1160            Yes, keeping reference count would be much more clever,
1161            but we make one more one thing there: reattach optmem
1162            to newsk.
1163          */
1164         opt = ireq->ipv6_opt;
1165         if (!opt)
1166                 opt = rcu_dereference(np->opt);
1167         if (opt) {
1168                 opt = ipv6_dup_options(newsk, opt);
1169                 RCU_INIT_POINTER(newnp->opt, opt);
1170         }
1171         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1172         if (opt)
1173                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1174                                                     opt->opt_flen;
1175 
1176         tcp_ca_openreq_child(newsk, dst);
1177 
1178         tcp_sync_mss(newsk, dst_mtu(dst));
1179         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1180 
1181         tcp_initialize_rcv_mss(newsk);
1182 
1183         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1184         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1185 
1186 #ifdef CONFIG_TCP_MD5SIG
1187         /* Copy over the MD5 key from the original socket */
1188         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1189         if (key) {
1190                 /* We're using one, so create a matching key
1191                  * on the newsk structure. If we fail to get
1192                  * memory, then we end up not copying the key
1193                  * across. Shucks.
1194                  */
1195                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1196                                AF_INET6, key->key, key->keylen,
1197                                sk_gfp_mask(sk, GFP_ATOMIC));
1198         }
1199 #endif
1200 
1201         if (__inet_inherit_port(sk, newsk) < 0) {
1202                 inet_csk_prepare_forced_close(newsk);
1203                 tcp_done(newsk);
1204                 goto out;
1205         }
1206         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1207         if (*own_req) {
1208                 tcp_move_syn(newtp, req);
1209 
1210                 /* Clone pktoptions received with SYN, if we own the req */
1211                 if (ireq->pktopts) {
1212                         newnp->pktoptions = skb_clone(ireq->pktopts,
1213                                                       sk_gfp_mask(sk, GFP_ATOMIC));
1214                         consume_skb(ireq->pktopts);
1215                         ireq->pktopts = NULL;
1216                         if (newnp->pktoptions) {
1217                                 tcp_v6_restore_cb(newnp->pktoptions);
1218                                 skb_set_owner_r(newnp->pktoptions, newsk);
1219                         }
1220                 }
1221         }
1222 
1223         return newsk;
1224 
1225 out_overflow:
1226         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1227 out_nonewsk:
1228         dst_release(dst);
1229 out:
1230         tcp_listendrop(sk);
1231         return NULL;
1232 }
1233 
1234 /* The socket must have it's spinlock held when we get
1235  * here, unless it is a TCP_LISTEN socket.
1236  *
1237  * We have a potential double-lock case here, so even when
1238  * doing backlog processing we use the BH locking scheme.
1239  * This is because we cannot sleep with the original spinlock
1240  * held.
1241  */
1242 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1243 {
1244         struct ipv6_pinfo *np = inet6_sk(sk);
1245         struct tcp_sock *tp;
1246         struct sk_buff *opt_skb = NULL;
1247 
1248         /* Imagine: socket is IPv6. IPv4 packet arrives,
1249            goes to IPv4 receive handler and backlogged.
1250            From backlog it always goes here. Kerboom...
1251            Fortunately, tcp_rcv_established and rcv_established
1252            handle them correctly, but it is not case with
1253            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1254          */
1255 
1256         if (skb->protocol == htons(ETH_P_IP))
1257                 return tcp_v4_do_rcv(sk, skb);
1258 
1259         if (tcp_filter(sk, skb))
1260                 goto discard;
1261 
1262         /*
1263          *      socket locking is here for SMP purposes as backlog rcv
1264          *      is currently called with bh processing disabled.
1265          */
1266 
1267         /* Do Stevens' IPV6_PKTOPTIONS.
1268 
1269            Yes, guys, it is the only place in our code, where we
1270            may make it not affecting IPv4.
1271            The rest of code is protocol independent,
1272            and I do not like idea to uglify IPv4.
1273 
1274            Actually, all the idea behind IPV6_PKTOPTIONS
1275            looks not very well thought. For now we latch
1276            options, received in the last packet, enqueued
1277            by tcp. Feel free to propose better solution.
1278                                                --ANK (980728)
1279          */
1280         if (np->rxopt.all)
1281                 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1282 
1283         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1284                 struct dst_entry *dst = sk->sk_rx_dst;
1285 
1286                 sock_rps_save_rxhash(sk, skb);
1287                 sk_mark_napi_id(sk, skb);
1288                 if (dst) {
1289                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1290                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1291                                 dst_release(dst);
1292                                 sk->sk_rx_dst = NULL;
1293                         }
1294                 }
1295 
1296                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1297                 if (opt_skb)
1298                         goto ipv6_pktoptions;
1299                 return 0;
1300         }
1301 
1302         if (tcp_checksum_complete(skb))
1303                 goto csum_err;
1304 
1305         if (sk->sk_state == TCP_LISTEN) {
1306                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1307 
1308                 if (!nsk)
1309                         goto discard;
1310 
1311                 if (nsk != sk) {
1312                         sock_rps_save_rxhash(nsk, skb);
1313                         sk_mark_napi_id(nsk, skb);
1314                         if (tcp_child_process(sk, nsk, skb))
1315                                 goto reset;
1316                         if (opt_skb)
1317                                 __kfree_skb(opt_skb);
1318                         return 0;
1319                 }
1320         } else
1321                 sock_rps_save_rxhash(sk, skb);
1322 
1323         if (tcp_rcv_state_process(sk, skb))
1324                 goto reset;
1325         if (opt_skb)
1326                 goto ipv6_pktoptions;
1327         return 0;
1328 
1329 reset:
1330         tcp_v6_send_reset(sk, skb);
1331 discard:
1332         if (opt_skb)
1333                 __kfree_skb(opt_skb);
1334         kfree_skb(skb);
1335         return 0;
1336 csum_err:
1337         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1338         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1339         goto discard;
1340 
1341 
1342 ipv6_pktoptions:
1343         /* Do you ask, what is it?
1344 
1345            1. skb was enqueued by tcp.
1346            2. skb is added to tail of read queue, rather than out of order.
1347            3. socket is not in passive state.
1348            4. Finally, it really contains options, which user wants to receive.
1349          */
1350         tp = tcp_sk(sk);
1351         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1352             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1353                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1354                         np->mcast_oif = tcp_v6_iif(opt_skb);
1355                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1356                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1357                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1358                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1359                 if (np->repflow)
1360                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1361                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1362                         skb_set_owner_r(opt_skb, sk);
1363                         tcp_v6_restore_cb(opt_skb);
1364                         opt_skb = xchg(&np->pktoptions, opt_skb);
1365                 } else {
1366                         __kfree_skb(opt_skb);
1367                         opt_skb = xchg(&np->pktoptions, NULL);
1368                 }
1369         }
1370 
1371         kfree_skb(opt_skb);
1372         return 0;
1373 }
1374 
1375 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1376                            const struct tcphdr *th)
1377 {
1378         /* This is tricky: we move IP6CB at its correct location into
1379          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1380          * _decode_session6() uses IP6CB().
1381          * barrier() makes sure compiler won't play aliasing games.
1382          */
1383         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1384                 sizeof(struct inet6_skb_parm));
1385         barrier();
1386 
1387         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1388         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1389                                     skb->len - th->doff*4);
1390         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1391         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1392         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1393         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1394         TCP_SKB_CB(skb)->sacked = 0;
1395 }
1396 
1397 static int tcp_v6_rcv(struct sk_buff *skb)
1398 {
1399         const struct tcphdr *th;
1400         const struct ipv6hdr *hdr;
1401         bool refcounted;
1402         struct sock *sk;
1403         int ret;
1404         struct net *net = dev_net(skb->dev);
1405 
1406         if (skb->pkt_type != PACKET_HOST)
1407                 goto discard_it;
1408 
1409         /*
1410          *      Count it even if it's bad.
1411          */
1412         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1413 
1414         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1415                 goto discard_it;
1416 
1417         th = (const struct tcphdr *)skb->data;
1418 
1419         if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1420                 goto bad_packet;
1421         if (!pskb_may_pull(skb, th->doff*4))
1422                 goto discard_it;
1423 
1424         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1425                 goto csum_error;
1426 
1427         th = (const struct tcphdr *)skb->data;
1428         hdr = ipv6_hdr(skb);
1429 
1430 lookup:
1431         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1432                                 th->source, th->dest, inet6_iif(skb),
1433                                 &refcounted);
1434         if (!sk)
1435                 goto no_tcp_socket;
1436 
1437 process:
1438         if (sk->sk_state == TCP_TIME_WAIT)
1439                 goto do_time_wait;
1440 
1441         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1442                 struct request_sock *req = inet_reqsk(sk);
1443                 struct sock *nsk;
1444 
1445                 sk = req->rsk_listener;
1446                 tcp_v6_fill_cb(skb, hdr, th);
1447                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1448                         sk_drops_add(sk, skb);
1449                         reqsk_put(req);
1450                         goto discard_it;
1451                 }
1452                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1453                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1454                         goto lookup;
1455                 }
1456                 sock_hold(sk);
1457                 refcounted = true;
1458                 nsk = tcp_check_req(sk, skb, req, false);
1459                 if (!nsk) {
1460                         reqsk_put(req);
1461                         goto discard_and_relse;
1462                 }
1463                 if (nsk == sk) {
1464                         reqsk_put(req);
1465                         tcp_v6_restore_cb(skb);
1466                 } else if (tcp_child_process(sk, nsk, skb)) {
1467                         tcp_v6_send_reset(nsk, skb);
1468                         goto discard_and_relse;
1469                 } else {
1470                         sock_put(sk);
1471                         return 0;
1472                 }
1473         }
1474         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1475                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1476                 goto discard_and_relse;
1477         }
1478 
1479         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1480                 goto discard_and_relse;
1481 
1482         tcp_v6_fill_cb(skb, hdr, th);
1483 
1484         if (tcp_v6_inbound_md5_hash(sk, skb))
1485                 goto discard_and_relse;
1486 
1487         if (tcp_filter(sk, skb))
1488                 goto discard_and_relse;
1489         th = (const struct tcphdr *)skb->data;
1490         hdr = ipv6_hdr(skb);
1491 
1492         skb->dev = NULL;
1493 
1494         if (sk->sk_state == TCP_LISTEN) {
1495                 ret = tcp_v6_do_rcv(sk, skb);
1496                 goto put_and_return;
1497         }
1498 
1499         sk_incoming_cpu_update(sk);
1500 
1501         bh_lock_sock_nested(sk);
1502         tcp_segs_in(tcp_sk(sk), skb);
1503         ret = 0;
1504         if (!sock_owned_by_user(sk)) {
1505                 if (!tcp_prequeue(sk, skb))
1506                         ret = tcp_v6_do_rcv(sk, skb);
1507         } else if (tcp_add_backlog(sk, skb)) {
1508                 goto discard_and_relse;
1509         }
1510         bh_unlock_sock(sk);
1511 
1512 put_and_return:
1513         if (refcounted)
1514                 sock_put(sk);
1515         return ret ? -1 : 0;
1516 
1517 no_tcp_socket:
1518         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1519                 goto discard_it;
1520 
1521         tcp_v6_fill_cb(skb, hdr, th);
1522 
1523         if (tcp_checksum_complete(skb)) {
1524 csum_error:
1525                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1526 bad_packet:
1527                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1528         } else {
1529                 tcp_v6_send_reset(NULL, skb);
1530         }
1531 
1532 discard_it:
1533         kfree_skb(skb);
1534         return 0;
1535 
1536 discard_and_relse:
1537         sk_drops_add(sk, skb);
1538         if (refcounted)
1539                 sock_put(sk);
1540         goto discard_it;
1541 
1542 do_time_wait:
1543         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1544                 inet_twsk_put(inet_twsk(sk));
1545                 goto discard_it;
1546         }
1547 
1548         tcp_v6_fill_cb(skb, hdr, th);
1549 
1550         if (tcp_checksum_complete(skb)) {
1551                 inet_twsk_put(inet_twsk(sk));
1552                 goto csum_error;
1553         }
1554 
1555         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1556         case TCP_TW_SYN:
1557         {
1558                 struct sock *sk2;
1559 
1560                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1561                                             skb, __tcp_hdrlen(th),
1562                                             &ipv6_hdr(skb)->saddr, th->source,
1563                                             &ipv6_hdr(skb)->daddr,
1564                                             ntohs(th->dest), tcp_v6_iif(skb));
1565                 if (sk2) {
1566                         struct inet_timewait_sock *tw = inet_twsk(sk);
1567                         inet_twsk_deschedule_put(tw);
1568                         sk = sk2;
1569                         tcp_v6_restore_cb(skb);
1570                         refcounted = false;
1571                         goto process;
1572                 }
1573                 /* Fall through to ACK */
1574         }
1575         case TCP_TW_ACK:
1576                 tcp_v6_timewait_ack(sk, skb);
1577                 break;
1578         case TCP_TW_RST:
1579                 tcp_v6_restore_cb(skb);
1580                 tcp_v6_send_reset(sk, skb);
1581                 inet_twsk_deschedule_put(inet_twsk(sk));
1582                 goto discard_it;
1583         case TCP_TW_SUCCESS:
1584                 ;
1585         }
1586         goto discard_it;
1587 }
1588 
1589 static void tcp_v6_early_demux(struct sk_buff *skb)
1590 {
1591         const struct ipv6hdr *hdr;
1592         const struct tcphdr *th;
1593         struct sock *sk;
1594 
1595         if (skb->pkt_type != PACKET_HOST)
1596                 return;
1597 
1598         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1599                 return;
1600 
1601         hdr = ipv6_hdr(skb);
1602         th = tcp_hdr(skb);
1603 
1604         if (th->doff < sizeof(struct tcphdr) / 4)
1605                 return;
1606 
1607         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1608         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1609                                         &hdr->saddr, th->source,
1610                                         &hdr->daddr, ntohs(th->dest),
1611                                         inet6_iif(skb));
1612         if (sk) {
1613                 skb->sk = sk;
1614                 skb->destructor = sock_edemux;
1615                 if (sk_fullsock(sk)) {
1616                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1617 
1618                         if (dst)
1619                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1620                         if (dst &&
1621                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1622                                 skb_dst_set_noref(skb, dst);
1623                 }
1624         }
1625 }
1626 
1627 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1628         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1629         .twsk_unique    = tcp_twsk_unique,
1630         .twsk_destructor = tcp_twsk_destructor,
1631 };
1632 
1633 static const struct inet_connection_sock_af_ops ipv6_specific = {
1634         .queue_xmit        = inet6_csk_xmit,
1635         .send_check        = tcp_v6_send_check,
1636         .rebuild_header    = inet6_sk_rebuild_header,
1637         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1638         .conn_request      = tcp_v6_conn_request,
1639         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1640         .net_header_len    = sizeof(struct ipv6hdr),
1641         .net_frag_header_len = sizeof(struct frag_hdr),
1642         .setsockopt        = ipv6_setsockopt,
1643         .getsockopt        = ipv6_getsockopt,
1644         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1645         .sockaddr_len      = sizeof(struct sockaddr_in6),
1646 #ifdef CONFIG_COMPAT
1647         .compat_setsockopt = compat_ipv6_setsockopt,
1648         .compat_getsockopt = compat_ipv6_getsockopt,
1649 #endif
1650         .mtu_reduced       = tcp_v6_mtu_reduced,
1651 };
1652 
1653 #ifdef CONFIG_TCP_MD5SIG
1654 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1655         .md5_lookup     =       tcp_v6_md5_lookup,
1656         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1657         .md5_parse      =       tcp_v6_parse_md5_keys,
1658 };
1659 #endif
1660 
1661 /*
1662  *      TCP over IPv4 via INET6 API
1663  */
1664 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1665         .queue_xmit        = ip_queue_xmit,
1666         .send_check        = tcp_v4_send_check,
1667         .rebuild_header    = inet_sk_rebuild_header,
1668         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1669         .conn_request      = tcp_v6_conn_request,
1670         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1671         .net_header_len    = sizeof(struct iphdr),
1672         .setsockopt        = ipv6_setsockopt,
1673         .getsockopt        = ipv6_getsockopt,
1674         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1675         .sockaddr_len      = sizeof(struct sockaddr_in6),
1676 #ifdef CONFIG_COMPAT
1677         .compat_setsockopt = compat_ipv6_setsockopt,
1678         .compat_getsockopt = compat_ipv6_getsockopt,
1679 #endif
1680         .mtu_reduced       = tcp_v4_mtu_reduced,
1681 };
1682 
1683 #ifdef CONFIG_TCP_MD5SIG
1684 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1685         .md5_lookup     =       tcp_v4_md5_lookup,
1686         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1687         .md5_parse      =       tcp_v6_parse_md5_keys,
1688 };
1689 #endif
1690 
1691 /* NOTE: A lot of things set to zero explicitly by call to
1692  *       sk_alloc() so need not be done here.
1693  */
1694 static int tcp_v6_init_sock(struct sock *sk)
1695 {
1696         struct inet_connection_sock *icsk = inet_csk(sk);
1697 
1698         tcp_init_sock(sk);
1699 
1700         icsk->icsk_af_ops = &ipv6_specific;
1701 
1702 #ifdef CONFIG_TCP_MD5SIG
1703         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1704 #endif
1705 
1706         return 0;
1707 }
1708 
1709 static void tcp_v6_destroy_sock(struct sock *sk)
1710 {
1711         tcp_v4_destroy_sock(sk);
1712         inet6_destroy_sock(sk);
1713 }
1714 
1715 #ifdef CONFIG_PROC_FS
1716 /* Proc filesystem TCPv6 sock list dumping. */
1717 static void get_openreq6(struct seq_file *seq,
1718                          const struct request_sock *req, int i)
1719 {
1720         long ttd = req->rsk_timer.expires - jiffies;
1721         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1722         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1723 
1724         if (ttd < 0)
1725                 ttd = 0;
1726 
1727         seq_printf(seq,
1728                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1729                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1730                    i,
1731                    src->s6_addr32[0], src->s6_addr32[1],
1732                    src->s6_addr32[2], src->s6_addr32[3],
1733                    inet_rsk(req)->ir_num,
1734                    dest->s6_addr32[0], dest->s6_addr32[1],
1735                    dest->s6_addr32[2], dest->s6_addr32[3],
1736                    ntohs(inet_rsk(req)->ir_rmt_port),
1737                    TCP_SYN_RECV,
1738                    0, 0, /* could print option size, but that is af dependent. */
1739                    1,   /* timers active (only the expire timer) */
1740                    jiffies_to_clock_t(ttd),
1741                    req->num_timeout,
1742                    from_kuid_munged(seq_user_ns(seq),
1743                                     sock_i_uid(req->rsk_listener)),
1744                    0,  /* non standard timer */
1745                    0, /* open_requests have no inode */
1746                    0, req);
1747 }
1748 
1749 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1750 {
1751         const struct in6_addr *dest, *src;
1752         __u16 destp, srcp;
1753         int timer_active;
1754         unsigned long timer_expires;
1755         const struct inet_sock *inet = inet_sk(sp);
1756         const struct tcp_sock *tp = tcp_sk(sp);
1757         const struct inet_connection_sock *icsk = inet_csk(sp);
1758         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1759         int rx_queue;
1760         int state;
1761 
1762         dest  = &sp->sk_v6_daddr;
1763         src   = &sp->sk_v6_rcv_saddr;
1764         destp = ntohs(inet->inet_dport);
1765         srcp  = ntohs(inet->inet_sport);
1766 
1767         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1768             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1769             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1770                 timer_active    = 1;
1771                 timer_expires   = icsk->icsk_timeout;
1772         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1773                 timer_active    = 4;
1774                 timer_expires   = icsk->icsk_timeout;
1775         } else if (timer_pending(&sp->sk_timer)) {
1776                 timer_active    = 2;
1777                 timer_expires   = sp->sk_timer.expires;
1778         } else {
1779                 timer_active    = 0;
1780                 timer_expires = jiffies;
1781         }
1782 
1783         state = sk_state_load(sp);
1784         if (state == TCP_LISTEN)
1785                 rx_queue = sp->sk_ack_backlog;
1786         else
1787                 /* Because we don't lock the socket,
1788                  * we might find a transient negative value.
1789                  */
1790                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1791 
1792         seq_printf(seq,
1793                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1794                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1795                    i,
1796                    src->s6_addr32[0], src->s6_addr32[1],
1797                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1798                    dest->s6_addr32[0], dest->s6_addr32[1],
1799                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1800                    state,
1801                    tp->write_seq - tp->snd_una,
1802                    rx_queue,
1803                    timer_active,
1804                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1805                    icsk->icsk_retransmits,
1806                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1807                    icsk->icsk_probes_out,
1808                    sock_i_ino(sp),
1809                    atomic_read(&sp->sk_refcnt), sp,
1810                    jiffies_to_clock_t(icsk->icsk_rto),
1811                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1812                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1813                    tp->snd_cwnd,
1814                    state == TCP_LISTEN ?
1815                         fastopenq->max_qlen :
1816                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1817                    );
1818 }
1819 
1820 static void get_timewait6_sock(struct seq_file *seq,
1821                                struct inet_timewait_sock *tw, int i)
1822 {
1823         long delta = tw->tw_timer.expires - jiffies;
1824         const struct in6_addr *dest, *src;
1825         __u16 destp, srcp;
1826 
1827         dest = &tw->tw_v6_daddr;
1828         src  = &tw->tw_v6_rcv_saddr;
1829         destp = ntohs(tw->tw_dport);
1830         srcp  = ntohs(tw->tw_sport);
1831 
1832         seq_printf(seq,
1833                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1834                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1835                    i,
1836                    src->s6_addr32[0], src->s6_addr32[1],
1837                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1838                    dest->s6_addr32[0], dest->s6_addr32[1],
1839                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1840                    tw->tw_substate, 0, 0,
1841                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1842                    atomic_read(&tw->tw_refcnt), tw);
1843 }
1844 
1845 static int tcp6_seq_show(struct seq_file *seq, void *v)
1846 {
1847         struct tcp_iter_state *st;
1848         struct sock *sk = v;
1849 
1850         if (v == SEQ_START_TOKEN) {
1851                 seq_puts(seq,
1852                          "  sl  "
1853                          "local_address                         "
1854                          "remote_address                        "
1855                          "st tx_queue rx_queue tr tm->when retrnsmt"
1856                          "   uid  timeout inode\n");
1857                 goto out;
1858         }
1859         st = seq->private;
1860 
1861         if (sk->sk_state == TCP_TIME_WAIT)
1862                 get_timewait6_sock(seq, v, st->num);
1863         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1864                 get_openreq6(seq, v, st->num);
1865         else
1866                 get_tcp6_sock(seq, v, st->num);
1867 out:
1868         return 0;
1869 }
1870 
1871 static const struct file_operations tcp6_afinfo_seq_fops = {
1872         .owner   = THIS_MODULE,
1873         .open    = tcp_seq_open,
1874         .read    = seq_read,
1875         .llseek  = seq_lseek,
1876         .release = seq_release_net
1877 };
1878 
1879 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1880         .name           = "tcp6",
1881         .family         = AF_INET6,
1882         .seq_fops       = &tcp6_afinfo_seq_fops,
1883         .seq_ops        = {
1884                 .show           = tcp6_seq_show,
1885         },
1886 };
1887 
1888 int __net_init tcp6_proc_init(struct net *net)
1889 {
1890         return tcp_proc_register(net, &tcp6_seq_afinfo);
1891 }
1892 
1893 void tcp6_proc_exit(struct net *net)
1894 {
1895         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1896 }
1897 #endif
1898 
1899 struct proto tcpv6_prot = {
1900         .name                   = "TCPv6",
1901         .owner                  = THIS_MODULE,
1902         .close                  = tcp_close,
1903         .connect                = tcp_v6_connect,
1904         .disconnect             = tcp_disconnect,
1905         .accept                 = inet_csk_accept,
1906         .ioctl                  = tcp_ioctl,
1907         .init                   = tcp_v6_init_sock,
1908         .destroy                = tcp_v6_destroy_sock,
1909         .shutdown               = tcp_shutdown,
1910         .setsockopt             = tcp_setsockopt,
1911         .getsockopt             = tcp_getsockopt,
1912         .keepalive              = tcp_set_keepalive,
1913         .recvmsg                = tcp_recvmsg,
1914         .sendmsg                = tcp_sendmsg,
1915         .sendpage               = tcp_sendpage,
1916         .backlog_rcv            = tcp_v6_do_rcv,
1917         .release_cb             = tcp_release_cb,
1918         .hash                   = inet6_hash,
1919         .unhash                 = inet_unhash,
1920         .get_port               = inet_csk_get_port,
1921         .enter_memory_pressure  = tcp_enter_memory_pressure,
1922         .stream_memory_free     = tcp_stream_memory_free,
1923         .sockets_allocated      = &tcp_sockets_allocated,
1924         .memory_allocated       = &tcp_memory_allocated,
1925         .memory_pressure        = &tcp_memory_pressure,
1926         .orphan_count           = &tcp_orphan_count,
1927         .sysctl_mem             = sysctl_tcp_mem,
1928         .sysctl_wmem            = sysctl_tcp_wmem,
1929         .sysctl_rmem            = sysctl_tcp_rmem,
1930         .max_header             = MAX_TCP_HEADER,
1931         .obj_size               = sizeof(struct tcp6_sock),
1932         .slab_flags             = SLAB_DESTROY_BY_RCU,
1933         .twsk_prot              = &tcp6_timewait_sock_ops,
1934         .rsk_prot               = &tcp6_request_sock_ops,
1935         .h.hashinfo             = &tcp_hashinfo,
1936         .no_autobind            = true,
1937 #ifdef CONFIG_COMPAT
1938         .compat_setsockopt      = compat_tcp_setsockopt,
1939         .compat_getsockopt      = compat_tcp_getsockopt,
1940 #endif
1941         .diag_destroy           = tcp_abort,
1942 };
1943 
1944 static const struct inet6_protocol tcpv6_protocol = {
1945         .early_demux    =       tcp_v6_early_demux,
1946         .handler        =       tcp_v6_rcv,
1947         .err_handler    =       tcp_v6_err,
1948         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1949 };
1950 
1951 static struct inet_protosw tcpv6_protosw = {
1952         .type           =       SOCK_STREAM,
1953         .protocol       =       IPPROTO_TCP,
1954         .prot           =       &tcpv6_prot,
1955         .ops            =       &inet6_stream_ops,
1956         .flags          =       INET_PROTOSW_PERMANENT |
1957                                 INET_PROTOSW_ICSK,
1958 };
1959 
1960 static int __net_init tcpv6_net_init(struct net *net)
1961 {
1962         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1963                                     SOCK_RAW, IPPROTO_TCP, net);
1964 }
1965 
1966 static void __net_exit tcpv6_net_exit(struct net *net)
1967 {
1968         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1969 }
1970 
1971 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1972 {
1973         inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1974 }
1975 
1976 static struct pernet_operations tcpv6_net_ops = {
1977         .init       = tcpv6_net_init,
1978         .exit       = tcpv6_net_exit,
1979         .exit_batch = tcpv6_net_exit_batch,
1980 };
1981 
1982 int __init tcpv6_init(void)
1983 {
1984         int ret;
1985 
1986         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1987         if (ret)
1988                 goto out;
1989 
1990         /* register inet6 protocol */
1991         ret = inet6_register_protosw(&tcpv6_protosw);
1992         if (ret)
1993                 goto out_tcpv6_protocol;
1994 
1995         ret = register_pernet_subsys(&tcpv6_net_ops);
1996         if (ret)
1997                 goto out_tcpv6_protosw;
1998 out:
1999         return ret;
2000 
2001 out_tcpv6_protosw:
2002         inet6_unregister_protosw(&tcpv6_protosw);
2003 out_tcpv6_protocol:
2004         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2005         goto out;
2006 }
2007 
2008 void tcpv6_exit(void)
2009 {
2010         unregister_pernet_subsys(&tcpv6_net_ops);
2011         inet6_unregister_protosw(&tcpv6_protosw);
2012         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2013 }
2014 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp