~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv6/tcp_ipv6.c

Version: ~ [ linux-6.3-rc3 ] ~ [ linux-6.2.7 ] ~ [ linux-6.1.20 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.103 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.175 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.237 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.278 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.310 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      TCP over IPv6
  3  *      Linux INET6 implementation 
  4  *
  5  *      Authors:
  6  *      Pedro Roque             <roque@di.fc.ul.pt>     
  7  *
  8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
  9  *
 10  *      Based on: 
 11  *      linux/net/ipv4/tcp.c
 12  *      linux/net/ipv4/tcp_input.c
 13  *      linux/net/ipv4/tcp_output.c
 14  *
 15  *      Fixes:
 16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
 17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 19  *                                      a single port at the same time.
 20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
 21  *
 22  *      This program is free software; you can redistribute it and/or
 23  *      modify it under the terms of the GNU General Public License
 24  *      as published by the Free Software Foundation; either version
 25  *      2 of the License, or (at your option) any later version.
 26  */
 27 
 28 #include <linux/module.h>
 29 #include <linux/config.h>
 30 #include <linux/errno.h>
 31 #include <linux/types.h>
 32 #include <linux/socket.h>
 33 #include <linux/sockios.h>
 34 #include <linux/net.h>
 35 #include <linux/jiffies.h>
 36 #include <linux/in.h>
 37 #include <linux/in6.h>
 38 #include <linux/netdevice.h>
 39 #include <linux/init.h>
 40 #include <linux/jhash.h>
 41 #include <linux/ipsec.h>
 42 #include <linux/times.h>
 43 
 44 #include <linux/ipv6.h>
 45 #include <linux/icmpv6.h>
 46 #include <linux/random.h>
 47 
 48 #include <net/tcp.h>
 49 #include <net/ndisc.h>
 50 #include <net/ipv6.h>
 51 #include <net/transp_v6.h>
 52 #include <net/addrconf.h>
 53 #include <net/ip6_route.h>
 54 #include <net/inet_ecn.h>
 55 #include <net/protocol.h>
 56 #include <net/xfrm.h>
 57 
 58 #include <asm/uaccess.h>
 59 
 60 #include <linux/proc_fs.h>
 61 #include <linux/seq_file.h>
 62 
 63 static void     tcp_v6_send_reset(struct sk_buff *skb);
 64 static void     tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req);
 65 static void     tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 
 66                                   struct sk_buff *skb);
 67 
 68 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 69 static int      tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
 70 
 71 static struct tcp_func ipv6_mapped;
 72 static struct tcp_func ipv6_specific;
 73 
 74 /* I have no idea if this is a good hash for v6 or not. -DaveM */
 75 static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,
 76                                     struct in6_addr *faddr, u16 fport)
 77 {
 78         int hashent = (lport ^ fport);
 79 
 80         hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
 81         hashent ^= hashent>>16;
 82         hashent ^= hashent>>8;
 83         return (hashent & (tcp_ehash_size - 1));
 84 }
 85 
 86 static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
 87 {
 88         struct inet_opt *inet = inet_sk(sk);
 89         struct ipv6_pinfo *np = inet6_sk(sk);
 90         struct in6_addr *laddr = &np->rcv_saddr;
 91         struct in6_addr *faddr = &np->daddr;
 92         __u16 lport = inet->num;
 93         __u16 fport = inet->dport;
 94         return tcp_v6_hashfn(laddr, lport, faddr, fport);
 95 }
 96 
 97 static inline int tcp_v6_bind_conflict(struct sock *sk,
 98                                        struct tcp_bind_bucket *tb)
 99 {
100         struct sock *sk2;
101         struct hlist_node *node;
102 
103         /* We must walk the whole port owner list in this case. -DaveM */
104         sk_for_each_bound(sk2, node, &tb->owners) {
105                 if (sk != sk2 &&
106                     (!sk->sk_bound_dev_if ||
107                      !sk2->sk_bound_dev_if ||
108                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
109                     (!sk->sk_reuse || !sk2->sk_reuse ||
110                      sk2->sk_state == TCP_LISTEN) &&
111                      ipv6_rcv_saddr_equal(sk, sk2))
112                         break;
113         }
114 
115         return node != NULL;
116 }
117 
118 /* Grrr, addr_type already calculated by caller, but I don't want
119  * to add some silly "cookie" argument to this method just for that.
120  * But it doesn't matter, the recalculation is in the rarest path
121  * this function ever takes.
122  */
123 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
124 {
125         struct tcp_bind_hashbucket *head;
126         struct tcp_bind_bucket *tb;
127         struct hlist_node *node;
128         int ret;
129 
130         local_bh_disable();
131         if (snum == 0) {
132                 int low = sysctl_local_port_range[0];
133                 int high = sysctl_local_port_range[1];
134                 int remaining = (high - low) + 1;
135                 int rover;
136 
137                 spin_lock(&tcp_portalloc_lock);
138                 rover = tcp_port_rover;
139                 do {    rover++;
140                         if ((rover < low) || (rover > high))
141                                 rover = low;
142                         head = &tcp_bhash[tcp_bhashfn(rover)];
143                         spin_lock(&head->lock);
144                         tb_for_each(tb, node, &head->chain)
145                                 if (tb->port == rover)
146                                         goto next;
147                         break;
148                 next:
149                         spin_unlock(&head->lock);
150                 } while (--remaining > 0);
151                 tcp_port_rover = rover;
152                 spin_unlock(&tcp_portalloc_lock);
153 
154                 /* Exhausted local port range during search? */
155                 ret = 1;
156                 if (remaining <= 0)
157                         goto fail;
158 
159                 /* OK, here is the one we will use. */
160                 snum = rover;
161         } else {
162                 head = &tcp_bhash[tcp_bhashfn(snum)];
163                 spin_lock(&head->lock);
164                 tb_for_each(tb, node, &head->chain)
165                         if (tb->port == snum)
166                                 goto tb_found;
167         }
168         tb = NULL;
169         goto tb_not_found;
170 tb_found:
171         if (tb && !hlist_empty(&tb->owners)) {
172                 if (tb->fastreuse > 0 && sk->sk_reuse &&
173                     sk->sk_state != TCP_LISTEN) {
174                         goto success;
175                 } else {
176                         ret = 1;
177                         if (tcp_v6_bind_conflict(sk, tb))
178                                 goto fail_unlock;
179                 }
180         }
181 tb_not_found:
182         ret = 1;
183         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
184                 goto fail_unlock;
185         if (hlist_empty(&tb->owners)) {
186                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
187                         tb->fastreuse = 1;
188                 else
189                         tb->fastreuse = 0;
190         } else if (tb->fastreuse &&
191                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
192                 tb->fastreuse = 0;
193 
194 success:
195         if (!tcp_sk(sk)->bind_hash)
196                 tcp_bind_hash(sk, tb, snum);
197         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
198         ret = 0;
199 
200 fail_unlock:
201         spin_unlock(&head->lock);
202 fail:
203         local_bh_enable();
204         return ret;
205 }
206 
207 static __inline__ void __tcp_v6_hash(struct sock *sk)
208 {
209         struct hlist_head *list;
210         rwlock_t *lock;
211 
212         BUG_TRAP(sk_unhashed(sk));
213 
214         if (sk->sk_state == TCP_LISTEN) {
215                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
216                 lock = &tcp_lhash_lock;
217                 tcp_listen_wlock();
218         } else {
219                 sk->sk_hashent = tcp_v6_sk_hashfn(sk);
220                 list = &tcp_ehash[sk->sk_hashent].chain;
221                 lock = &tcp_ehash[sk->sk_hashent].lock;
222                 write_lock(lock);
223         }
224 
225         __sk_add_node(sk, list);
226         sock_prot_inc_use(sk->sk_prot);
227         write_unlock(lock);
228 }
229 
230 
231 static void tcp_v6_hash(struct sock *sk)
232 {
233         if (sk->sk_state != TCP_CLOSE) {
234                 struct tcp_opt *tp = tcp_sk(sk);
235 
236                 if (tp->af_specific == &ipv6_mapped) {
237                         tcp_prot.hash(sk);
238                         return;
239                 }
240                 local_bh_disable();
241                 __tcp_v6_hash(sk);
242                 local_bh_enable();
243         }
244 }
245 
246 static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
247 {
248         struct sock *sk;
249         struct hlist_node *node;
250         struct sock *result = NULL;
251         int score, hiscore;
252 
253         hiscore=0;
254         read_lock(&tcp_lhash_lock);
255         sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
256                 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
257                         struct ipv6_pinfo *np = inet6_sk(sk);
258                         
259                         score = 1;
260                         if (!ipv6_addr_any(&np->rcv_saddr)) {
261                                 if (ipv6_addr_cmp(&np->rcv_saddr, daddr))
262                                         continue;
263                                 score++;
264                         }
265                         if (sk->sk_bound_dev_if) {
266                                 if (sk->sk_bound_dev_if != dif)
267                                         continue;
268                                 score++;
269                         }
270                         if (score == 3) {
271                                 result = sk;
272                                 break;
273                         }
274                         if (score > hiscore) {
275                                 hiscore = score;
276                                 result = sk;
277                         }
278                 }
279         }
280         if (result)
281                 sock_hold(result);
282         read_unlock(&tcp_lhash_lock);
283         return result;
284 }
285 
286 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
287  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
288  *
289  * The sockhash lock must be held as a reader here.
290  */
291 
292 static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,
293                                                        struct in6_addr *daddr, u16 hnum,
294                                                        int dif)
295 {
296         struct tcp_ehash_bucket *head;
297         struct sock *sk;
298         struct hlist_node *node;
299         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
300         int hash;
301 
302         /* Optimize here for direct hit, only listening connections can
303          * have wildcards anyways.
304          */
305         hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
306         head = &tcp_ehash[hash];
307         read_lock(&head->lock);
308         sk_for_each(sk, node, &head->chain) {
309                 /* For IPV6 do the cheaper port and family tests first. */
310                 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
311                         goto hit; /* You sunk my battleship! */
312         }
313         /* Must check for a TIME_WAIT'er before going to listener hash. */
314         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
315                 /* FIXME: acme: check this... */
316                 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
317 
318                 if(*((__u32 *)&(tw->tw_dport))  == ports        &&
319                    sk->sk_family                == PF_INET6) {
320                         if(!ipv6_addr_cmp(&tw->tw_v6_daddr, saddr)      &&
321                            !ipv6_addr_cmp(&tw->tw_v6_rcv_saddr, daddr)  &&
322                            (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
323                                 goto hit;
324                 }
325         }
326         read_unlock(&head->lock);
327         return NULL;
328 
329 hit:
330         sock_hold(sk);
331         read_unlock(&head->lock);
332         return sk;
333 }
334 
335 
336 static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
337                                            struct in6_addr *daddr, u16 hnum,
338                                            int dif)
339 {
340         struct sock *sk;
341 
342         sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);
343 
344         if (sk)
345                 return sk;
346 
347         return tcp_v6_lookup_listener(daddr, hnum, dif);
348 }
349 
350 inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
351                                   struct in6_addr *daddr, u16 dport,
352                                   int dif)
353 {
354         struct sock *sk;
355 
356         local_bh_disable();
357         sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);
358         local_bh_enable();
359 
360         return sk;
361 }
362 
363 
364 /*
365  * Open request hash tables.
366  */
367 
368 static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
369 {
370         u32 a, b, c;
371 
372         a = raddr->s6_addr32[0];
373         b = raddr->s6_addr32[1];
374         c = raddr->s6_addr32[2];
375 
376         a += JHASH_GOLDEN_RATIO;
377         b += JHASH_GOLDEN_RATIO;
378         c += rnd;
379         __jhash_mix(a, b, c);
380 
381         a += raddr->s6_addr32[3];
382         b += (u32) rport;
383         __jhash_mix(a, b, c);
384 
385         return c & (TCP_SYNQ_HSIZE - 1);
386 }
387 
388 static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,
389                                               struct open_request ***prevp,
390                                               __u16 rport,
391                                               struct in6_addr *raddr,
392                                               struct in6_addr *laddr,
393                                               int iif)
394 {
395         struct tcp_listen_opt *lopt = tp->listen_opt;
396         struct open_request *req, **prev;  
397 
398         for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
399              (req = *prev) != NULL;
400              prev = &req->dl_next) {
401                 if (req->rmt_port == rport &&
402                     req->class->family == AF_INET6 &&
403                     !ipv6_addr_cmp(&req->af.v6_req.rmt_addr, raddr) &&
404                     !ipv6_addr_cmp(&req->af.v6_req.loc_addr, laddr) &&
405                     (!req->af.v6_req.iif || req->af.v6_req.iif == iif)) {
406                         BUG_TRAP(req->sk == NULL);
407                         *prevp = prev;
408                         return req;
409                 }
410         }
411 
412         return NULL;
413 }
414 
415 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
416                                    struct in6_addr *saddr, 
417                                    struct in6_addr *daddr, 
418                                    unsigned long base)
419 {
420         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
421 }
422 
423 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
424 {
425         if (skb->protocol == htons(ETH_P_IPV6)) {
426                 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
427                                                     skb->nh.ipv6h->saddr.s6_addr32,
428                                                     skb->h.th->dest,
429                                                     skb->h.th->source);
430         } else {
431                 return secure_tcp_sequence_number(skb->nh.iph->daddr,
432                                                   skb->nh.iph->saddr,
433                                                   skb->h.th->dest,
434                                                   skb->h.th->source);
435         }
436 }
437 
438 static int tcp_v6_check_established(struct sock *sk)
439 {
440         struct inet_opt *inet = inet_sk(sk);
441         struct ipv6_pinfo *np = inet6_sk(sk);
442         struct in6_addr *daddr = &np->rcv_saddr;
443         struct in6_addr *saddr = &np->daddr;
444         int dif = sk->sk_bound_dev_if;
445         u32 ports = TCP_COMBINED_PORTS(inet->dport, inet->num);
446         int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
447         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
448         struct sock *sk2;
449         struct hlist_node *node;
450         struct tcp_tw_bucket *tw;
451 
452         write_lock_bh(&head->lock);
453 
454         /* Check TIME-WAIT sockets first. */
455         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
456                 tw = (struct tcp_tw_bucket*)sk2;
457 
458                 if(*((__u32 *)&(tw->tw_dport))  == ports        &&
459                    sk2->sk_family               == PF_INET6     &&
460                    !ipv6_addr_cmp(&tw->tw_v6_daddr, saddr)      &&
461                    !ipv6_addr_cmp(&tw->tw_v6_rcv_saddr, daddr)  &&
462                    sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
463                         struct tcp_opt *tp = tcp_sk(sk);
464 
465                         if (tw->tw_ts_recent_stamp) {
466                                 /* See comment in tcp_ipv4.c */
467                                 tp->write_seq = tw->tw_snd_nxt + 65535 + 2;
468                                 if (!tp->write_seq)
469                                         tp->write_seq = 1;
470                                 tp->ts_recent = tw->tw_ts_recent;
471                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
472                                 sock_hold(sk2);
473                                 goto unique;
474                         } else
475                                 goto not_unique;
476                 }
477         }
478         tw = NULL;
479 
480         /* And established part... */
481         sk_for_each(sk2, node, &head->chain) {
482                 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
483                         goto not_unique;
484         }
485 
486 unique:
487         BUG_TRAP(sk_unhashed(sk));
488         sk_add_node(sk, &head->chain);
489         sk->sk_hashent = hash;
490         sock_prot_inc_use(sk->sk_prot);
491         write_unlock_bh(&head->lock);
492 
493         if (tw) {
494                 /* Silly. Should hash-dance instead... */
495                 local_bh_disable();
496                 tcp_tw_deschedule(tw);
497                 NET_INC_STATS_BH(TimeWaitRecycled);
498                 local_bh_enable();
499 
500                 tcp_tw_put(tw);
501         }
502         return 0;
503 
504 not_unique:
505         write_unlock_bh(&head->lock);
506         return -EADDRNOTAVAIL;
507 }
508 
509 static int tcp_v6_hash_connect(struct sock *sk)
510 {
511         struct tcp_bind_hashbucket *head;
512         struct tcp_bind_bucket *tb;
513 
514         /* XXX */
515         if (inet_sk(sk)->num == 0) { 
516                 int err = tcp_v6_get_port(sk, inet_sk(sk)->num);
517                 if (err)
518                         return err;
519                 inet_sk(sk)->sport = htons(inet_sk(sk)->num);
520         }
521 
522         head = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
523         tb = tb_head(head);
524 
525         spin_lock_bh(&head->lock);
526 
527         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
528                 __tcp_v6_hash(sk);
529                 spin_unlock_bh(&head->lock);
530                 return 0;
531         } else {
532                 spin_unlock_bh(&head->lock);
533                 return tcp_v6_check_established(sk);
534         }
535 }
536 
537 static __inline__ int tcp_v6_iif(struct sk_buff *skb)
538 {
539         struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
540         return opt->iif;
541 }
542 
543 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 
544                           int addr_len)
545 {
546         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
547         struct inet_opt *inet = inet_sk(sk);
548         struct ipv6_pinfo *np = inet6_sk(sk);
549         struct tcp_opt *tp = tcp_sk(sk);
550         struct in6_addr *saddr = NULL;
551         struct flowi fl;
552         struct dst_entry *dst;
553         int addr_type;
554         int err;
555 
556         if (addr_len < SIN6_LEN_RFC2133) 
557                 return -EINVAL;
558 
559         if (usin->sin6_family != AF_INET6) 
560                 return(-EAFNOSUPPORT);
561 
562         memset(&fl, 0, sizeof(fl));
563 
564         if (np->sndflow) {
565                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
566                 IP6_ECN_flow_init(fl.fl6_flowlabel);
567                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
568                         struct ip6_flowlabel *flowlabel;
569                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
570                         if (flowlabel == NULL)
571                                 return -EINVAL;
572                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
573                         fl6_sock_release(flowlabel);
574                 }
575         }
576 
577         /*
578          *      connect() to INADDR_ANY means loopback (BSD'ism).
579          */
580         
581         if(ipv6_addr_any(&usin->sin6_addr))
582                 usin->sin6_addr.s6_addr[15] = 0x1; 
583 
584         addr_type = ipv6_addr_type(&usin->sin6_addr);
585 
586         if(addr_type & IPV6_ADDR_MULTICAST)
587                 return -ENETUNREACH;
588 
589         if (addr_type&IPV6_ADDR_LINKLOCAL) {
590                 if (addr_len >= sizeof(struct sockaddr_in6) &&
591                     usin->sin6_scope_id) {
592                         /* If interface is set while binding, indices
593                          * must coincide.
594                          */
595                         if (sk->sk_bound_dev_if &&
596                             sk->sk_bound_dev_if != usin->sin6_scope_id)
597                                 return -EINVAL;
598 
599                         sk->sk_bound_dev_if = usin->sin6_scope_id;
600                 }
601 
602                 /* Connect to link-local address requires an interface */
603                 if (!sk->sk_bound_dev_if)
604                         return -EINVAL;
605         }
606 
607         if (tp->ts_recent_stamp &&
608             ipv6_addr_cmp(&np->daddr, &usin->sin6_addr)) {
609                 tp->ts_recent = 0;
610                 tp->ts_recent_stamp = 0;
611                 tp->write_seq = 0;
612         }
613 
614         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
615         np->flow_label = fl.fl6_flowlabel;
616 
617         /*
618          *      TCP over IPv4
619          */
620 
621         if (addr_type == IPV6_ADDR_MAPPED) {
622                 u32 exthdrlen = tp->ext_header_len;
623                 struct sockaddr_in sin;
624 
625                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
626 
627                 if (__ipv6_only_sock(sk))
628                         return -ENETUNREACH;
629 
630                 sin.sin_family = AF_INET;
631                 sin.sin_port = usin->sin6_port;
632                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
633 
634                 tp->af_specific = &ipv6_mapped;
635                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
636 
637                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
638 
639                 if (err) {
640                         tp->ext_header_len = exthdrlen;
641                         tp->af_specific = &ipv6_specific;
642                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
643                         goto failure;
644                 } else {
645                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
646                                       inet->saddr);
647                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
648                                       inet->rcv_saddr);
649                 }
650 
651                 return err;
652         }
653 
654         if (!ipv6_addr_any(&np->rcv_saddr))
655                 saddr = &np->rcv_saddr;
656 
657         fl.proto = IPPROTO_TCP;
658         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
659         ipv6_addr_copy(&fl.fl6_src,
660                        (saddr ? saddr : &np->saddr));
661         fl.oif = sk->sk_bound_dev_if;
662         fl.fl_ip_dport = usin->sin6_port;
663         fl.fl_ip_sport = inet->sport;
664 
665         if (np->opt && np->opt->srcrt) {
666                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
667                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
668         }
669 
670         err = ip6_dst_lookup(sk, &dst, &fl);
671 
672         if (err)
673                 goto failure;
674 
675         if (saddr == NULL) {
676                 saddr = &fl.fl6_src;
677                 ipv6_addr_copy(&np->rcv_saddr, saddr);
678         }
679 
680         /* set the source address */
681         ipv6_addr_copy(&np->saddr, saddr);
682         inet->rcv_saddr = LOOPBACK4_IPV6;
683 
684         ip6_dst_store(sk, dst, NULL);
685         sk->sk_route_caps = dst->dev->features &
686                 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
687 
688         tp->ext_header_len = 0;
689         if (np->opt)
690                 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
691         tp->ext2_header_len = dst->header_len;
692 
693         tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
694 
695         inet->dport = usin->sin6_port;
696 
697         tcp_set_state(sk, TCP_SYN_SENT);
698         err = tcp_v6_hash_connect(sk);
699         if (err)
700                 goto late_failure;
701 
702         if (!tp->write_seq)
703                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
704                                                              np->daddr.s6_addr32,
705                                                              inet->sport,
706                                                              inet->dport);
707 
708         err = tcp_connect(sk);
709         if (err)
710                 goto late_failure;
711 
712         return 0;
713 
714 late_failure:
715         tcp_set_state(sk, TCP_CLOSE);
716         __sk_dst_reset(sk);
717 failure:
718         inet->dport = 0;
719         sk->sk_route_caps = 0;
720         return err;
721 }
722 
723 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
724                 int type, int code, int offset, __u32 info)
725 {
726         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
727         struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
728         struct ipv6_pinfo *np;
729         struct sock *sk;
730         int err;
731         struct tcp_opt *tp; 
732         __u32 seq;
733 
734         sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
735 
736         if (sk == NULL) {
737                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), Icmp6InErrors);
738                 return;
739         }
740 
741         if (sk->sk_state == TCP_TIME_WAIT) {
742                 tcp_tw_put((struct tcp_tw_bucket*)sk);
743                 return;
744         }
745 
746         bh_lock_sock(sk);
747         if (sock_owned_by_user(sk))
748                 NET_INC_STATS_BH(LockDroppedIcmps);
749 
750         if (sk->sk_state == TCP_CLOSE)
751                 goto out;
752 
753         tp = tcp_sk(sk);
754         seq = ntohl(th->seq); 
755         if (sk->sk_state != TCP_LISTEN &&
756             !between(seq, tp->snd_una, tp->snd_nxt)) {
757                 NET_INC_STATS_BH(OutOfWindowIcmps);
758                 goto out;
759         }
760 
761         np = inet6_sk(sk);
762 
763         if (type == ICMPV6_PKT_TOOBIG) {
764                 struct dst_entry *dst = NULL;
765 
766                 if (sock_owned_by_user(sk))
767                         goto out;
768                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
769                         goto out;
770 
771                 /* icmp should have updated the destination cache entry */
772                 dst = __sk_dst_check(sk, np->dst_cookie);
773 
774                 if (dst == NULL) {
775                         struct inet_opt *inet = inet_sk(sk);
776                         struct flowi fl;
777 
778                         /* BUGGG_FUTURE: Again, it is not clear how
779                            to handle rthdr case. Ignore this complexity
780                            for now.
781                          */
782                         memset(&fl, 0, sizeof(fl));
783                         fl.proto = IPPROTO_TCP;
784                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
785                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
786                         fl.oif = sk->sk_bound_dev_if;
787                         fl.fl_ip_dport = inet->dport;
788                         fl.fl_ip_sport = inet->sport;
789 
790                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
791                                 sk->sk_err_soft = -err;
792                                 goto out;
793                         }
794                 } else
795                         dst_hold(dst);
796 
797                 if (tp->pmtu_cookie > dst_pmtu(dst)) {
798                         tcp_sync_mss(sk, dst_pmtu(dst));
799                         tcp_simple_retransmit(sk);
800                 } /* else let the usual retransmit timer handle it */
801                 dst_release(dst);
802                 goto out;
803         }
804 
805         icmpv6_err_convert(type, code, &err);
806 
807         /* Might be for an open_request */
808         switch (sk->sk_state) {
809                 struct open_request *req, **prev;
810         case TCP_LISTEN:
811                 if (sock_owned_by_user(sk))
812                         goto out;
813 
814                 req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr,
815                                         &hdr->saddr, tcp_v6_iif(skb));
816                 if (!req)
817                         goto out;
818 
819                 /* ICMPs are not backlogged, hence we cannot get
820                  * an established socket here.
821                  */
822                 BUG_TRAP(req->sk == NULL);
823 
824                 if (seq != req->snt_isn) {
825                         NET_INC_STATS_BH(OutOfWindowIcmps);
826                         goto out;
827                 }
828 
829                 tcp_synq_drop(sk, req, prev);
830                 goto out;
831 
832         case TCP_SYN_SENT:
833         case TCP_SYN_RECV:  /* Cannot happen.
834                                It can, it SYNs are crossed. --ANK */ 
835                 if (!sock_owned_by_user(sk)) {
836                         TCP_INC_STATS_BH(TcpAttemptFails);
837                         sk->sk_err = err;
838                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
839 
840                         tcp_done(sk);
841                 } else
842                         sk->sk_err_soft = err;
843                 goto out;
844         }
845 
846         if (!sock_owned_by_user(sk) && np->recverr) {
847                 sk->sk_err = err;
848                 sk->sk_error_report(sk);
849         } else
850                 sk->sk_err_soft = err;
851 
852 out:
853         bh_unlock_sock(sk);
854         sock_put(sk);
855 }
856 
857 
858 static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
859                               struct dst_entry *dst)
860 {
861         struct ipv6_pinfo *np = inet6_sk(sk);
862         struct sk_buff * skb;
863         struct ipv6_txoptions *opt = NULL;
864         struct flowi fl;
865         int err = -1;
866 
867         memset(&fl, 0, sizeof(fl));
868         fl.proto = IPPROTO_TCP;
869         ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
870         ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
871         fl.fl6_flowlabel = 0;
872         fl.oif = req->af.v6_req.iif;
873         fl.fl_ip_dport = req->rmt_port;
874         fl.fl_ip_sport = inet_sk(sk)->sport;
875 
876         if (dst == NULL) {
877                 opt = np->opt;
878                 if (opt == NULL &&
879                     np->rxopt.bits.srcrt == 2 &&
880                     req->af.v6_req.pktopts) {
881                         struct sk_buff *pktopts = req->af.v6_req.pktopts;
882                         struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)pktopts->cb;
883                         if (rxopt->srcrt)
884                                 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
885                 }
886 
887                 if (opt && opt->srcrt) {
888                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
889                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
890                 }
891 
892                 err = ip6_dst_lookup(sk, &dst, &fl);
893                 if (err)
894                         goto done;
895         }
896 
897         skb = tcp_make_synack(sk, dst, req);
898         if (skb) {
899                 struct tcphdr *th = skb->h.th;
900 
901                 th->check = tcp_v6_check(th, skb->len,
902                                          &req->af.v6_req.loc_addr, &req->af.v6_req.rmt_addr,
903                                          csum_partial((char *)th, skb->len, skb->csum));
904 
905                 ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
906                 err = ip6_xmit(sk, skb, &fl, opt, 0);
907                 if (err == NET_XMIT_CN)
908                         err = 0;
909         }
910 
911 done:
912         dst_release(dst);
913         if (opt && opt != np->opt)
914                 sock_kfree_s(sk, opt, opt->tot_len);
915         return err;
916 }
917 
918 static void tcp_v6_or_free(struct open_request *req)
919 {
920         if (req->af.v6_req.pktopts)
921                 kfree_skb(req->af.v6_req.pktopts);
922 }
923 
924 static struct or_calltable or_ipv6 = {
925         .family         =       AF_INET6,
926         .rtx_syn_ack    =       tcp_v6_send_synack,
927         .send_ack       =       tcp_v6_or_send_ack,
928         .destructor     =       tcp_v6_or_free,
929         .send_reset     =       tcp_v6_send_reset
930 };
931 
932 static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
933 {
934         struct ipv6_pinfo *np = inet6_sk(sk);
935         struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
936 
937         if (np->rxopt.all) {
938                 if ((opt->hop && np->rxopt.bits.hopopts) ||
939                     ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) &&
940                      np->rxopt.bits.rxflow) ||
941                     (opt->srcrt && np->rxopt.bits.srcrt) ||
942                     ((opt->dst1 || opt->dst0) && np->rxopt.bits.dstopts))
943                         return 1;
944         }
945         return 0;
946 }
947 
948 
949 static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 
950                               struct sk_buff *skb)
951 {
952         struct ipv6_pinfo *np = inet6_sk(sk);
953 
954         if (skb->ip_summed == CHECKSUM_HW) {
955                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
956                 skb->csum = offsetof(struct tcphdr, check);
957         } else {
958                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 
959                                             csum_partial((char *)th, th->doff<<2, 
960                                                          skb->csum));
961         }
962 }
963 
964 
965 static void tcp_v6_send_reset(struct sk_buff *skb)
966 {
967         struct tcphdr *th = skb->h.th, *t1; 
968         struct sk_buff *buff;
969         struct flowi fl;
970 
971         if (th->rst)
972                 return;
973 
974         if (!ipv6_unicast_destination(skb))
975                 return; 
976 
977         /*
978          * We need to grab some memory, and put together an RST,
979          * and then put it into the queue to be sent.
980          */
981 
982         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr), GFP_ATOMIC);
983         if (buff == NULL) 
984                 return;
985 
986         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr));
987 
988         t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
989 
990         /* Swap the send and the receive. */
991         memset(t1, 0, sizeof(*t1));
992         t1->dest = th->source;
993         t1->source = th->dest;
994         t1->doff = sizeof(*t1)/4;
995         t1->rst = 1;
996   
997         if(th->ack) {
998                 t1->seq = th->ack_seq;
999         } else {
1000                 t1->ack = 1;
1001                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1002                                     + skb->len - (th->doff<<2));
1003         }
1004 
1005         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1006 
1007         memset(&fl, 0, sizeof(fl));
1008         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1009         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1010 
1011         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1012                                     sizeof(*t1), IPPROTO_TCP,
1013                                     buff->csum);
1014 
1015         fl.proto = IPPROTO_TCP;
1016         fl.oif = tcp_v6_iif(skb);
1017         fl.fl_ip_dport = t1->dest;
1018         fl.fl_ip_sport = t1->source;
1019 
1020         /* sk = NULL, but it is safe for now. RST socket required. */
1021         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1022                 ip6_xmit(NULL, buff, &fl, NULL, 0);
1023                 TCP_INC_STATS_BH(TcpOutSegs);
1024                 TCP_INC_STATS_BH(TcpOutRsts);
1025                 return;
1026         }
1027 
1028         kfree_skb(buff);
1029 }
1030 
1031 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1032 {
1033         struct tcphdr *th = skb->h.th, *t1;
1034         struct sk_buff *buff;
1035         struct flowi fl;
1036         int tot_len = sizeof(struct tcphdr);
1037 
1038         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr), GFP_ATOMIC);
1039         if (buff == NULL)
1040                 return;
1041 
1042         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr));
1043 
1044         if (ts)
1045                 tot_len += 3*4;
1046 
1047         t1 = (struct tcphdr *) skb_push(buff,tot_len);
1048 
1049         /* Swap the send and the receive. */
1050         memset(t1, 0, sizeof(*t1));
1051         t1->dest = th->source;
1052         t1->source = th->dest;
1053         t1->doff = tot_len/4;
1054         t1->seq = htonl(seq);
1055         t1->ack_seq = htonl(ack);
1056         t1->ack = 1;
1057         t1->window = htons(win);
1058         
1059         if (ts) {
1060                 u32 *ptr = (u32*)(t1 + 1);
1061                 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1062                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1063                 *ptr++ = htonl(tcp_time_stamp);
1064                 *ptr = htonl(ts);
1065         }
1066 
1067         buff->csum = csum_partial((char *)t1, tot_len, 0);
1068 
1069         memset(&fl, 0, sizeof(fl));
1070         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1071         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1072 
1073         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1074                                     tot_len, IPPROTO_TCP,
1075                                     buff->csum);
1076 
1077         fl.proto = IPPROTO_TCP;
1078         fl.oif = tcp_v6_iif(skb);
1079         fl.fl_ip_dport = t1->dest;
1080         fl.fl_ip_sport = t1->source;
1081 
1082         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1083                 ip6_xmit(NULL, buff, &fl, NULL, 0);
1084                 TCP_INC_STATS_BH(TcpOutSegs);
1085                 return;
1086         }
1087 
1088         kfree_skb(buff);
1089 }
1090 
1091 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1092 {
1093         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1094 
1095         tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1096                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1097 
1098         tcp_tw_put(tw);
1099 }
1100 
1101 static void tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req)
1102 {
1103         tcp_v6_send_ack(skb, req->snt_isn+1, req->rcv_isn+1, req->rcv_wnd, req->ts_recent);
1104 }
1105 
1106 
1107 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1108 {
1109         struct open_request *req, **prev;
1110         struct tcphdr *th = skb->h.th;
1111         struct tcp_opt *tp = tcp_sk(sk);
1112         struct sock *nsk;
1113 
1114         /* Find possible connection requests. */
1115         req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr,
1116                                 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb));
1117         if (req)
1118                 return tcp_check_req(sk, skb, req, prev);
1119 
1120         nsk = __tcp_v6_lookup_established(&skb->nh.ipv6h->saddr,
1121                                           th->source,
1122                                           &skb->nh.ipv6h->daddr,
1123                                           ntohs(th->dest),
1124                                           tcp_v6_iif(skb));
1125 
1126         if (nsk) {
1127                 if (nsk->sk_state != TCP_TIME_WAIT) {
1128                         bh_lock_sock(nsk);
1129                         return nsk;
1130                 }
1131                 tcp_tw_put((struct tcp_tw_bucket*)nsk);
1132                 return NULL;
1133         }
1134 
1135 #if 0 /*def CONFIG_SYN_COOKIES*/
1136         if (!th->rst && !th->syn && th->ack)
1137                 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1138 #endif
1139         return sk;
1140 }
1141 
1142 static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
1143 {
1144         struct tcp_opt *tp = tcp_sk(sk);
1145         struct tcp_listen_opt *lopt = tp->listen_opt;
1146         u32 h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
1147 
1148         req->sk = NULL;
1149         req->expires = jiffies + TCP_TIMEOUT_INIT;
1150         req->retrans = 0;
1151         req->dl_next = lopt->syn_table[h];
1152 
1153         write_lock(&tp->syn_wait_lock);
1154         lopt->syn_table[h] = req;
1155         write_unlock(&tp->syn_wait_lock);
1156 
1157         tcp_synq_added(sk);
1158 }
1159 
1160 
1161 /* FIXME: this is substantially similar to the ipv4 code.
1162  * Can some kind of merge be done? -- erics
1163  */
1164 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1165 {
1166         struct ipv6_pinfo *np = inet6_sk(sk);
1167         struct tcp_opt tmptp, *tp = tcp_sk(sk);
1168         struct open_request *req = NULL;
1169         __u32 isn = TCP_SKB_CB(skb)->when;
1170 
1171         if (skb->protocol == htons(ETH_P_IP))
1172                 return tcp_v4_conn_request(sk, skb);
1173 
1174         if (!ipv6_unicast_destination(skb))
1175                 goto drop; 
1176 
1177         /*
1178          *      There are no SYN attacks on IPv6, yet...        
1179          */
1180         if (tcp_synq_is_full(sk) && !isn) {
1181                 if (net_ratelimit())
1182                         printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1183                 goto drop;              
1184         }
1185 
1186         if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1187                 goto drop;
1188 
1189         req = tcp_openreq_alloc();
1190         if (req == NULL)
1191                 goto drop;
1192 
1193         tcp_clear_options(&tmptp);
1194         tmptp.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1195         tmptp.user_mss = tp->user_mss;
1196 
1197         tcp_parse_options(skb, &tmptp, 0);
1198 
1199         tmptp.tstamp_ok = tmptp.saw_tstamp;
1200         tcp_openreq_init(req, &tmptp, skb);
1201 
1202         req->class = &or_ipv6;
1203         ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr);
1204         ipv6_addr_copy(&req->af.v6_req.loc_addr, &skb->nh.ipv6h->daddr);
1205         TCP_ECN_create_request(req, skb->h.th);
1206         req->af.v6_req.pktopts = NULL;
1207         if (ipv6_opt_accepted(sk, skb) ||
1208             np->rxopt.bits.rxinfo ||
1209             np->rxopt.bits.rxhlim) {
1210                 atomic_inc(&skb->users);
1211                 req->af.v6_req.pktopts = skb;
1212         }
1213         req->af.v6_req.iif = sk->sk_bound_dev_if;
1214 
1215         /* So that link locals have meaning */
1216         if (!sk->sk_bound_dev_if &&
1217             ipv6_addr_type(&req->af.v6_req.rmt_addr) & IPV6_ADDR_LINKLOCAL)
1218                 req->af.v6_req.iif = tcp_v6_iif(skb);
1219 
1220         if (isn == 0) 
1221                 isn = tcp_v6_init_sequence(sk,skb);
1222 
1223         req->snt_isn = isn;
1224 
1225         if (tcp_v6_send_synack(sk, req, NULL))
1226                 goto drop;
1227 
1228         tcp_v6_synq_add(sk, req);
1229 
1230         return 0;
1231 
1232 drop:
1233         if (req)
1234                 tcp_openreq_free(req);
1235 
1236         TCP_INC_STATS_BH(TcpAttemptFails);
1237         return 0; /* don't send reset */
1238 }
1239 
1240 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1241                                           struct open_request *req,
1242                                           struct dst_entry *dst)
1243 {
1244         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1245         struct tcp6_sock *newtcp6sk;
1246         struct inet_opt *newinet;
1247         struct tcp_opt *newtp;
1248         struct sock *newsk;
1249         struct ipv6_txoptions *opt;
1250 
1251         if (skb->protocol == htons(ETH_P_IP)) {
1252                 /*
1253                  *      v6 mapped
1254                  */
1255 
1256                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1257 
1258                 if (newsk == NULL) 
1259                         return NULL;
1260 
1261                 newtcp6sk = (struct tcp6_sock *)newsk;
1262                 newtcp6sk->pinet6 = &newtcp6sk->inet6;
1263 
1264                 newinet = inet_sk(newsk);
1265                 newnp = inet6_sk(newsk);
1266                 newtp = tcp_sk(newsk);
1267 
1268                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1269 
1270                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1271                               newinet->daddr);
1272 
1273                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1274                               newinet->saddr);
1275 
1276                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1277 
1278                 newtp->af_specific = &ipv6_mapped;
1279                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1280                 newnp->pktoptions  = NULL;
1281                 newnp->opt         = NULL;
1282                 newnp->mcast_oif   = tcp_v6_iif(skb);
1283                 newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
1284 
1285                 /* Charge newly allocated IPv6 socket. Though it is mapped,
1286                  * it is IPv6 yet.
1287                  */
1288 #ifdef INET_REFCNT_DEBUG
1289                 atomic_inc(&inet6_sock_nr);
1290 #endif
1291 
1292                 /* It is tricky place. Until this moment IPv4 tcp
1293                    worked with IPv6 af_tcp.af_specific.
1294                    Sync it now.
1295                  */
1296                 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1297 
1298                 return newsk;
1299         }
1300 
1301         opt = np->opt;
1302 
1303         if (tcp_acceptq_is_full(sk))
1304                 goto out_overflow;
1305 
1306         if (np->rxopt.bits.srcrt == 2 &&
1307             opt == NULL && req->af.v6_req.pktopts) {
1308                 struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)req->af.v6_req.pktopts->cb;
1309                 if (rxopt->srcrt)
1310                         opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(req->af.v6_req.pktopts->nh.raw+rxopt->srcrt));
1311         }
1312 
1313         if (dst == NULL) {
1314                 struct flowi fl;
1315 
1316                 memset(&fl, 0, sizeof(fl));
1317                 fl.proto = IPPROTO_TCP;
1318                 ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
1319                 if (opt && opt->srcrt) {
1320                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1321                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1322                 }
1323                 ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
1324                 fl.oif = sk->sk_bound_dev_if;
1325                 fl.fl_ip_dport = req->rmt_port;
1326                 fl.fl_ip_sport = inet_sk(sk)->sport;
1327 
1328                 if (ip6_dst_lookup(sk, &dst, &fl))
1329                         goto out;
1330         } 
1331 
1332         newsk = tcp_create_openreq_child(sk, req, skb);
1333         if (newsk == NULL)
1334                 goto out;
1335 
1336         /* Charge newly allocated IPv6 socket */
1337 #ifdef INET_REFCNT_DEBUG
1338         atomic_inc(&inet6_sock_nr);
1339 #endif
1340 
1341         ip6_dst_store(newsk, dst, NULL);
1342         newsk->sk_route_caps = dst->dev->features &
1343                 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1344 
1345         newtcp6sk = (struct tcp6_sock *)newsk;
1346         newtcp6sk->pinet6 = &newtcp6sk->inet6;
1347 
1348         newtp = tcp_sk(newsk);
1349         newinet = inet_sk(newsk);
1350         newnp = inet6_sk(newsk);
1351 
1352         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1353 
1354         ipv6_addr_copy(&newnp->daddr, &req->af.v6_req.rmt_addr);
1355         ipv6_addr_copy(&newnp->saddr, &req->af.v6_req.loc_addr);
1356         ipv6_addr_copy(&newnp->rcv_saddr, &req->af.v6_req.loc_addr);
1357         newsk->sk_bound_dev_if = req->af.v6_req.iif;
1358 
1359         /* Now IPv6 options... 
1360 
1361            First: no IPv4 options.
1362          */
1363         newinet->opt = NULL;
1364 
1365         /* Clone RX bits */
1366         newnp->rxopt.all = np->rxopt.all;
1367 
1368         /* Clone pktoptions received with SYN */
1369         newnp->pktoptions = NULL;
1370         if (req->af.v6_req.pktopts) {
1371                 newnp->pktoptions = skb_clone(req->af.v6_req.pktopts,
1372                                               GFP_ATOMIC);
1373                 kfree_skb(req->af.v6_req.pktopts);
1374                 req->af.v6_req.pktopts = NULL;
1375                 if (newnp->pktoptions)
1376                         skb_set_owner_r(newnp->pktoptions, newsk);
1377         }
1378         newnp->opt        = NULL;
1379         newnp->mcast_oif  = tcp_v6_iif(skb);
1380         newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1381 
1382         /* Clone native IPv6 options from listening socket (if any)
1383 
1384            Yes, keeping reference count would be much more clever,
1385            but we make one more one thing there: reattach optmem
1386            to newsk.
1387          */
1388         if (opt) {
1389                 newnp->opt = ipv6_dup_options(newsk, opt);
1390                 if (opt != np->opt)
1391                         sock_kfree_s(sk, opt, opt->tot_len);
1392         }
1393 
1394         newtp->ext_header_len = 0;
1395         if (newnp->opt)
1396                 newtp->ext_header_len = newnp->opt->opt_nflen +
1397                                         newnp->opt->opt_flen;
1398         newtp->ext2_header_len = dst->header_len;
1399 
1400         tcp_sync_mss(newsk, dst_pmtu(dst));
1401         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1402         tcp_initialize_rcv_mss(newsk);
1403 
1404         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1405 
1406         __tcp_v6_hash(newsk);
1407         tcp_inherit_port(sk, newsk);
1408 
1409         return newsk;
1410 
1411 out_overflow:
1412         NET_INC_STATS_BH(ListenOverflows);
1413 out:
1414         NET_INC_STATS_BH(ListenDrops);
1415         if (opt && opt != np->opt)
1416                 sock_kfree_s(sk, opt, opt->tot_len);
1417         dst_release(dst);
1418         return NULL;
1419 }
1420 
1421 static int tcp_v6_checksum_init(struct sk_buff *skb)
1422 {
1423         if (skb->ip_summed == CHECKSUM_HW) {
1424                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1425                 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1426                                   &skb->nh.ipv6h->daddr,skb->csum))
1427                         return 0;
1428                 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "hw tcp v6 csum failed\n"));
1429         }
1430         if (skb->len <= 76) {
1431                 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1432                                  &skb->nh.ipv6h->daddr,skb_checksum(skb, 0, skb->len, 0)))
1433                         return -1;
1434                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1435         } else {
1436                 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1437                                           &skb->nh.ipv6h->daddr,0);
1438         }
1439         return 0;
1440 }
1441 
1442 /* The socket must have it's spinlock held when we get
1443  * here.
1444  *
1445  * We have a potential double-lock case here, so even when
1446  * doing backlog processing we use the BH locking scheme.
1447  * This is because we cannot sleep with the original spinlock
1448  * held.
1449  */
1450 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1451 {
1452         struct ipv6_pinfo *np = inet6_sk(sk);
1453         struct tcp_opt *tp;
1454         struct sk_buff *opt_skb = NULL;
1455 
1456         /* Imagine: socket is IPv6. IPv4 packet arrives,
1457            goes to IPv4 receive handler and backlogged.
1458            From backlog it always goes here. Kerboom...
1459            Fortunately, tcp_rcv_established and rcv_established
1460            handle them correctly, but it is not case with
1461            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1462          */
1463 
1464         if (skb->protocol == htons(ETH_P_IP))
1465                 return tcp_v4_do_rcv(sk, skb);
1466 
1467         if (sk_filter(sk, skb, 0))
1468                 goto discard;
1469 
1470         /*
1471          *      socket locking is here for SMP purposes as backlog rcv
1472          *      is currently called with bh processing disabled.
1473          */
1474 
1475         /* Do Stevens' IPV6_PKTOPTIONS.
1476 
1477            Yes, guys, it is the only place in our code, where we
1478            may make it not affecting IPv4.
1479            The rest of code is protocol independent,
1480            and I do not like idea to uglify IPv4.
1481 
1482            Actually, all the idea behind IPV6_PKTOPTIONS
1483            looks not very well thought. For now we latch
1484            options, received in the last packet, enqueued
1485            by tcp. Feel free to propose better solution.
1486                                                --ANK (980728)
1487          */
1488         if (np->rxopt.all)
1489                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1490 
1491         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1492                 TCP_CHECK_TIMER(sk);
1493                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1494                         goto reset;
1495                 TCP_CHECK_TIMER(sk);
1496                 if (opt_skb)
1497                         goto ipv6_pktoptions;
1498                 return 0;
1499         }
1500 
1501         if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1502                 goto csum_err;
1503 
1504         if (sk->sk_state == TCP_LISTEN) { 
1505                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1506                 if (!nsk)
1507                         goto discard;
1508 
1509                 /*
1510                  * Queue it on the new socket if the new socket is active,
1511                  * otherwise we just shortcircuit this and continue with
1512                  * the new socket..
1513                  */
1514                 if(nsk != sk) {
1515                         if (tcp_child_process(sk, nsk, skb))
1516                                 goto reset;
1517                         if (opt_skb)
1518                                 __kfree_skb(opt_skb);
1519                         return 0;
1520                 }
1521         }
1522 
1523         TCP_CHECK_TIMER(sk);
1524         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1525                 goto reset;
1526         TCP_CHECK_TIMER(sk);
1527         if (opt_skb)
1528                 goto ipv6_pktoptions;
1529         return 0;
1530 
1531 reset:
1532         tcp_v6_send_reset(skb);
1533 discard:
1534         if (opt_skb)
1535                 __kfree_skb(opt_skb);
1536         kfree_skb(skb);
1537         return 0;
1538 csum_err:
1539         TCP_INC_STATS_BH(TcpInErrs);
1540         goto discard;
1541 
1542 
1543 ipv6_pktoptions:
1544         /* Do you ask, what is it?
1545 
1546            1. skb was enqueued by tcp.
1547            2. skb is added to tail of read queue, rather than out of order.
1548            3. socket is not in passive state.
1549            4. Finally, it really contains options, which user wants to receive.
1550          */
1551         tp = tcp_sk(sk);
1552         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1553             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1554                 if (np->rxopt.bits.rxinfo)
1555                         np->mcast_oif = tcp_v6_iif(opt_skb);
1556                 if (np->rxopt.bits.rxhlim)
1557                         np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1558                 if (ipv6_opt_accepted(sk, opt_skb)) {
1559                         skb_set_owner_r(opt_skb, sk);
1560                         opt_skb = xchg(&np->pktoptions, opt_skb);
1561                 } else {
1562                         __kfree_skb(opt_skb);
1563                         opt_skb = xchg(&np->pktoptions, NULL);
1564                 }
1565         }
1566 
1567         if (opt_skb)
1568                 kfree_skb(opt_skb);
1569         return 0;
1570 }
1571 
1572 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1573 {
1574         struct sk_buff *skb = *pskb;
1575         struct tcphdr *th;      
1576         struct sock *sk;
1577         int ret;
1578 
1579         if (skb->pkt_type != PACKET_HOST)
1580                 goto discard_it;
1581 
1582         /*
1583          *      Count it even if it's bad.
1584          */
1585         TCP_INC_STATS_BH(TcpInSegs);
1586 
1587         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1588                 goto discard_it;
1589 
1590         th = skb->h.th;
1591 
1592         if (th->doff < sizeof(struct tcphdr)/4)
1593                 goto bad_packet;
1594         if (!pskb_may_pull(skb, th->doff*4))
1595                 goto discard_it;
1596 
1597         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1598              tcp_v6_checksum_init(skb) < 0))
1599                 goto bad_packet;
1600 
1601         th = skb->h.th;
1602         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1603         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1604                                     skb->len - th->doff*4);
1605         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1606         TCP_SKB_CB(skb)->when = 0;
1607         TCP_SKB_CB(skb)->flags = ip6_get_dsfield(skb->nh.ipv6h);
1608         TCP_SKB_CB(skb)->sacked = 0;
1609 
1610         sk = __tcp_v6_lookup(&skb->nh.ipv6h->saddr, th->source,
1611                              &skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1612 
1613         if (!sk)
1614                 goto no_tcp_socket;
1615 
1616 process:
1617         if (sk->sk_state == TCP_TIME_WAIT)
1618                 goto do_time_wait;
1619 
1620         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1621                 goto discard_and_relse;
1622 
1623         if (sk_filter(sk, skb, 0))
1624                 goto discard_and_relse;
1625 
1626         skb->dev = NULL;
1627 
1628         bh_lock_sock(sk);
1629         ret = 0;
1630         if (!sock_owned_by_user(sk)) {
1631                 if (!tcp_prequeue(sk, skb))
1632                         ret = tcp_v6_do_rcv(sk, skb);
1633         } else
1634                 sk_add_backlog(sk, skb);
1635         bh_unlock_sock(sk);
1636 
1637         sock_put(sk);
1638         return ret ? -1 : 0;
1639 
1640 no_tcp_socket:
1641         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1642                 goto discard_and_relse;
1643 
1644         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1645 bad_packet:
1646                 TCP_INC_STATS_BH(TcpInErrs);
1647         } else {
1648                 tcp_v6_send_reset(skb);
1649         }
1650 
1651 discard_it:
1652 
1653         /*
1654          *      Discard frame
1655          */
1656 
1657         kfree_skb(skb);
1658         return 0;
1659 
1660 discard_and_relse:
1661         sock_put(sk);
1662         goto discard_it;
1663 
1664 do_time_wait:
1665         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1666                 goto discard_and_relse;
1667 
1668         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1669                 TCP_INC_STATS_BH(TcpInErrs);
1670                 sock_put(sk);
1671                 goto discard_it;
1672         }
1673 
1674         switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1675                                           skb, th, skb->len)) {
1676         case TCP_TW_SYN:
1677         {
1678                 struct sock *sk2;
1679 
1680                 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1681                 if (sk2 != NULL) {
1682                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1683                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1684                         sk = sk2;
1685                         goto process;
1686                 }
1687                 /* Fall through to ACK */
1688         }
1689         case TCP_TW_ACK:
1690                 tcp_v6_timewait_ack(sk, skb);
1691                 break;
1692         case TCP_TW_RST:
1693                 goto no_tcp_socket;
1694         case TCP_TW_SUCCESS:;
1695         }
1696         goto discard_it;
1697 }
1698 
1699 static int tcp_v6_rebuild_header(struct sock *sk)
1700 {
1701         int err;
1702         struct dst_entry *dst;
1703         struct ipv6_pinfo *np = inet6_sk(sk);
1704 
1705         dst = __sk_dst_check(sk, np->dst_cookie);
1706 
1707         if (dst == NULL) {
1708                 struct inet_opt *inet = inet_sk(sk);
1709                 struct flowi fl;
1710 
1711                 memset(&fl, 0, sizeof(fl));
1712                 fl.proto = IPPROTO_TCP;
1713                 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1714                 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1715                 fl.fl6_flowlabel = np->flow_label;
1716                 fl.oif = sk->sk_bound_dev_if;
1717                 fl.fl_ip_dport = inet->dport;
1718                 fl.fl_ip_sport = inet->sport;
1719 
1720                 if (np->opt && np->opt->srcrt) {
1721                         struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1722                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1723                 }
1724 
1725                 err = ip6_dst_lookup(sk, &dst, &fl);
1726 
1727                 if (err) {
1728                         sk->sk_route_caps = 0;
1729                         return err;
1730                 }
1731 
1732                 ip6_dst_store(sk, dst, NULL);
1733                 sk->sk_route_caps = dst->dev->features &
1734                         ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1735                 tcp_sk(sk)->ext2_header_len = dst->header_len;
1736         }
1737 
1738         return 0;
1739 }
1740 
1741 static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
1742 {
1743         struct sock *sk = skb->sk;
1744         struct inet_opt *inet = inet_sk(sk);
1745         struct ipv6_pinfo *np = inet6_sk(sk);
1746         struct flowi fl;
1747         struct dst_entry *dst;
1748 
1749         memset(&fl, 0, sizeof(fl));
1750         fl.proto = IPPROTO_TCP;
1751         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1752         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1753         fl.fl6_flowlabel = np->flow_label;
1754         IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
1755         fl.oif = sk->sk_bound_dev_if;
1756         fl.fl_ip_sport = inet->sport;
1757         fl.fl_ip_dport = inet->dport;
1758 
1759         if (np->opt && np->opt->srcrt) {
1760                 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1761                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1762         }
1763 
1764         dst = __sk_dst_check(sk, np->dst_cookie);
1765 
1766         if (dst == NULL) {
1767                 int err = ip6_dst_lookup(sk, &dst, &fl);
1768 
1769                 if (err) {
1770                         sk->sk_err_soft = -err;
1771                         return err;
1772                 }
1773 
1774                 ip6_dst_store(sk, dst, NULL);
1775                 sk->sk_route_caps = dst->dev->features &
1776                         ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1777                 tcp_sk(sk)->ext2_header_len = dst->header_len;
1778         }
1779 
1780         skb->dst = dst_clone(dst);
1781 
1782         /* Restore final destination back after routing done */
1783         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1784 
1785         return ip6_xmit(sk, skb, &fl, np->opt, 0);
1786 }
1787 
1788 static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1789 {
1790         struct ipv6_pinfo *np = inet6_sk(sk);
1791         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
1792 
1793         sin6->sin6_family = AF_INET6;
1794         ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
1795         sin6->sin6_port = inet_sk(sk)->dport;
1796         /* We do not store received flowlabel for TCP */
1797         sin6->sin6_flowinfo = 0;
1798         sin6->sin6_scope_id = 0;
1799         if (sk->sk_bound_dev_if &&
1800             ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1801                 sin6->sin6_scope_id = sk->sk_bound_dev_if;
1802 }
1803 
1804 static int tcp_v6_remember_stamp(struct sock *sk)
1805 {
1806         /* Alas, not yet... */
1807         return 0;
1808 }
1809 
1810 static struct tcp_func ipv6_specific = {
1811         .queue_xmit     =       tcp_v6_xmit,
1812         .send_check     =       tcp_v6_send_check,
1813         .rebuild_header =       tcp_v6_rebuild_header,
1814         .conn_request   =       tcp_v6_conn_request,
1815         .syn_recv_sock  =       tcp_v6_syn_recv_sock,
1816         .remember_stamp =       tcp_v6_remember_stamp,
1817         .net_header_len =       sizeof(struct ipv6hdr),
1818 
1819         .setsockopt     =       ipv6_setsockopt,
1820         .getsockopt     =       ipv6_getsockopt,
1821         .addr2sockaddr  =       v6_addr2sockaddr,
1822         .sockaddr_len   =       sizeof(struct sockaddr_in6)
1823 };
1824 
1825 /*
1826  *      TCP over IPv4 via INET6 API
1827  */
1828 
1829 static struct tcp_func ipv6_mapped = {
1830         .queue_xmit     =       ip_queue_xmit,
1831         .send_check     =       tcp_v4_send_check,
1832         .rebuild_header =       tcp_v4_rebuild_header,
1833         .conn_request   =       tcp_v6_conn_request,
1834         .syn_recv_sock  =       tcp_v6_syn_recv_sock,
1835         .remember_stamp =       tcp_v4_remember_stamp,
1836         .net_header_len =       sizeof(struct iphdr),
1837 
1838         .setsockopt     =       ipv6_setsockopt,
1839         .getsockopt     =       ipv6_getsockopt,
1840         .addr2sockaddr  =       v6_addr2sockaddr,
1841         .sockaddr_len   =       sizeof(struct sockaddr_in6)
1842 };
1843 
1844 
1845 
1846 /* NOTE: A lot of things set to zero explicitly by call to
1847  *       sk_alloc() so need not be done here.
1848  */
1849 static int tcp_v6_init_sock(struct sock *sk)
1850 {
1851         struct tcp_opt *tp = tcp_sk(sk);
1852 
1853         skb_queue_head_init(&tp->out_of_order_queue);
1854         tcp_init_xmit_timers(sk);
1855         tcp_prequeue_init(tp);
1856 
1857         tp->rto  = TCP_TIMEOUT_INIT;
1858         tp->mdev = TCP_TIMEOUT_INIT;
1859 
1860         /* So many TCP implementations out there (incorrectly) count the
1861          * initial SYN frame in their delayed-ACK and congestion control
1862          * algorithms that we must have the following bandaid to talk
1863          * efficiently to them.  -DaveM
1864          */
1865         tp->snd_cwnd = 2;
1866 
1867         /* See draft-stevens-tcpca-spec-01 for discussion of the
1868          * initialization of these values.
1869          */
1870         tp->snd_ssthresh = 0x7fffffff;
1871         tp->snd_cwnd_clamp = ~0;
1872         tp->mss_cache = 536;
1873 
1874         tp->reordering = sysctl_tcp_reordering;
1875 
1876         sk->sk_state = TCP_CLOSE;
1877 
1878         tp->af_specific = &ipv6_specific;
1879 
1880         sk->sk_write_space = tcp_write_space;
1881         sk->sk_use_write_queue = 1;
1882 
1883         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1884         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1885 
1886         atomic_inc(&tcp_sockets_allocated);
1887 
1888         return 0;
1889 }
1890 
1891 static int tcp_v6_destroy_sock(struct sock *sk)
1892 {
1893         struct tcp_opt *tp = tcp_sk(sk);
1894         struct inet_opt *inet = inet_sk(sk);
1895 
1896         tcp_clear_xmit_timers(sk);
1897 
1898         /* Cleanup up the write buffer. */
1899         tcp_writequeue_purge(sk);
1900 
1901         /* Cleans up our, hopefully empty, out_of_order_queue. */
1902         __skb_queue_purge(&tp->out_of_order_queue);
1903 
1904         /* Clean prequeue, it must be empty really */
1905         __skb_queue_purge(&tp->ucopy.prequeue);
1906 
1907         /* Clean up a referenced TCP bind bucket. */
1908         if (tcp_sk(sk)->bind_hash)
1909                 tcp_put_port(sk);
1910 
1911         /* If sendmsg cached page exists, toss it. */
1912         if (inet->sndmsg_page != NULL)
1913                 __free_page(inet->sndmsg_page);
1914 
1915         atomic_dec(&tcp_sockets_allocated);
1916 
1917         return inet6_destroy_sock(sk);
1918 }
1919 
1920 /* Proc filesystem TCPv6 sock list dumping. */
1921 static void get_openreq6(struct seq_file *seq, 
1922                          struct sock *sk, struct open_request *req, int i, int uid)
1923 {
1924         struct in6_addr *dest, *src;
1925         int ttd = req->expires - jiffies;
1926 
1927         if (ttd < 0)
1928                 ttd = 0;
1929 
1930         src = &req->af.v6_req.loc_addr;
1931         dest = &req->af.v6_req.rmt_addr;
1932         seq_printf(seq,
1933                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1934                    "%02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p\n",
1935                    i,
1936                    src->s6_addr32[0], src->s6_addr32[1],
1937                    src->s6_addr32[2], src->s6_addr32[3],
1938                    ntohs(inet_sk(sk)->sport),
1939                    dest->s6_addr32[0], dest->s6_addr32[1],
1940                    dest->s6_addr32[2], dest->s6_addr32[3],
1941                    ntohs(req->rmt_port),
1942                    TCP_SYN_RECV,
1943                    0,0, /* could print option size, but that is af dependent. */
1944                    1,   /* timers active (only the expire timer) */  
1945                    jiffies_to_clock_t(ttd), 
1946                    req->retrans,
1947                    uid,
1948                    0,  /* non standard timer */  
1949                    0, /* open_requests have no inode */
1950                    0, req);
1951 }
1952 
1953 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1954 {
1955         struct in6_addr *dest, *src;
1956         __u16 destp, srcp;
1957         int timer_active;
1958         unsigned long timer_expires;
1959         struct inet_opt *inet = inet_sk(sp);
1960         struct tcp_opt *tp = tcp_sk(sp);
1961         struct ipv6_pinfo *np = inet6_sk(sp);
1962 
1963         dest  = &np->daddr;
1964         src   = &np->rcv_saddr;
1965         destp = ntohs(inet->dport);
1966         srcp  = ntohs(inet->sport);
1967         if (tp->pending == TCP_TIME_RETRANS) {
1968                 timer_active    = 1;
1969                 timer_expires   = tp->timeout;
1970         } else if (tp->pending == TCP_TIME_PROBE0) {
1971                 timer_active    = 4;
1972                 timer_expires   = tp->timeout;
1973         } else if (timer_pending(&sp->sk_timer)) {
1974                 timer_active    = 2;
1975                 timer_expires   = sp->sk_timer.expires;
1976         } else {
1977                 timer_active    = 0;
1978                 timer_expires = jiffies;
1979         }
1980 
1981         seq_printf(seq,
1982                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1983                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1984                    i,
1985                    src->s6_addr32[0], src->s6_addr32[1],
1986                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1987                    dest->s6_addr32[0], dest->s6_addr32[1],
1988                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1989                    sp->sk_state, 
1990                    tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1991                    timer_active,
1992                    jiffies_to_clock_t(timer_expires - jiffies),
1993                    tp->retransmits,
1994                    sock_i_uid(sp),
1995                    tp->probes_out,
1996                    sock_i_ino(sp),
1997                    atomic_read(&sp->sk_refcnt), sp,
1998                    tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
1999                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2000                    );
2001 }
2002 
2003 static void get_timewait6_sock(struct seq_file *seq, 
2004                                struct tcp_tw_bucket *tw, int i)
2005 {
2006         struct in6_addr *dest, *src;
2007         __u16 destp, srcp;
2008         int ttd = tw->tw_ttd - jiffies;
2009 
2010         if (ttd < 0)
2011                 ttd = 0;
2012 
2013         dest  = &tw->tw_v6_daddr;
2014         src   = &tw->tw_v6_rcv_saddr;
2015         destp = ntohs(tw->tw_dport);
2016         srcp  = ntohs(tw->tw_sport);
2017 
2018         seq_printf(seq,
2019                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2020                    "%02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p\n",
2021                    i,
2022                    src->s6_addr32[0], src->s6_addr32[1],
2023                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2024                    dest->s6_addr32[0], dest->s6_addr32[1],
2025                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2026                    tw->tw_substate, 0, 0,
2027                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2028                    atomic_read(&tw->tw_refcnt), tw);
2029 }
2030 
2031 #ifdef CONFIG_PROC_FS
2032 static int tcp6_seq_show(struct seq_file *seq, void *v)
2033 {
2034         struct tcp_iter_state *st;
2035 
2036         if (v == SEQ_START_TOKEN) {
2037                 seq_printf(seq,
2038                            "  sl  "
2039                            "local_address                         "
2040                            "remote_address                        "
2041                            "st tx_queue rx_queue tr tm->when retrnsmt"
2042                            "   uid  timeout inode\n");
2043                 goto out;
2044         }
2045         st = seq->private;
2046 
2047         switch (st->state) {
2048         case TCP_SEQ_STATE_LISTENING:
2049         case TCP_SEQ_STATE_ESTABLISHED:
2050                 get_tcp6_sock(seq, v, st->num);
2051                 break;
2052         case TCP_SEQ_STATE_OPENREQ:
2053                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2054                 break;
2055         case TCP_SEQ_STATE_TIME_WAIT:
2056                 get_timewait6_sock(seq, v, st->num);
2057                 break;
2058         }
2059 out:
2060         return 0;
2061 }
2062 
2063 static struct file_operations tcp6_seq_fops;
2064 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2065         .owner          = THIS_MODULE,
2066         .name           = "tcp6",
2067         .family         = AF_INET6,
2068         .seq_show       = tcp6_seq_show,
2069         .seq_fops       = &tcp6_seq_fops,
2070 };
2071 
2072 int __init tcp6_proc_init(void)
2073 {
2074         return tcp_proc_register(&tcp6_seq_afinfo);
2075 }
2076 
2077 void tcp6_proc_exit(void)
2078 {
2079         tcp_proc_unregister(&tcp6_seq_afinfo);
2080 }
2081 #endif
2082 
2083 struct proto tcpv6_prot = {
2084         .name           =       "TCPv6",
2085         .close          =       tcp_close,
2086         .connect        =       tcp_v6_connect,
2087         .disconnect     =       tcp_disconnect,
2088         .accept         =       tcp_accept,
2089         .ioctl          =       tcp_ioctl,
2090         .init           =       tcp_v6_init_sock,
2091         .destroy        =       tcp_v6_destroy_sock,
2092         .shutdown       =       tcp_shutdown,
2093         .setsockopt     =       tcp_setsockopt,
2094         .getsockopt     =       tcp_getsockopt,
2095         .sendmsg        =       tcp_sendmsg,
2096         .recvmsg        =       tcp_recvmsg,
2097         .backlog_rcv    =       tcp_v6_do_rcv,
2098         .hash           =       tcp_v6_hash,
2099         .unhash         =       tcp_unhash,
2100         .get_port       =       tcp_v6_get_port,
2101 };
2102 
2103 static struct inet6_protocol tcpv6_protocol = {
2104         .handler        =       tcp_v6_rcv,
2105         .err_handler    =       tcp_v6_err,
2106         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2107 };
2108 
2109 extern struct proto_ops inet6_stream_ops;
2110 
2111 static struct inet_protosw tcpv6_protosw = {
2112         .type           =       SOCK_STREAM,
2113         .protocol       =       IPPROTO_TCP,
2114         .prot           =       &tcpv6_prot,
2115         .ops            =       &inet6_stream_ops,
2116         .capability     =       -1,
2117         .no_check       =       0,
2118         .flags          =       INET_PROTOSW_PERMANENT,
2119 };
2120 
2121 void __init tcpv6_init(void)
2122 {
2123         /* register inet6 protocol */
2124         if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2125                 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2126         inet6_register_protosw(&tcpv6_protosw);
2127 }
2128 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp