~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/inet_timewait_sock.c

Version: ~ [ linux-5.12-rc1 ] ~ [ linux-5.11.2 ] ~ [ linux-5.10.19 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.101 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.177 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.222 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.258 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.258 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  4  *              operating system.  INET is implemented using the  BSD Socket
  5  *              interface as the means of communication with the user level.
  6  *
  7  *              Generic TIME_WAIT sockets functions
  8  *
  9  *              From code orinally in TCP
 10  */
 11 
 12 #include <linux/kernel.h>
 13 #include <linux/slab.h>
 14 #include <linux/module.h>
 15 #include <net/inet_hashtables.h>
 16 #include <net/inet_timewait_sock.h>
 17 #include <net/ip.h>
 18 
 19 
 20 /**
 21  *      inet_twsk_bind_unhash - unhash a timewait socket from bind hash
 22  *      @tw: timewait socket
 23  *      @hashinfo: hashinfo pointer
 24  *
 25  *      unhash a timewait socket from bind hash, if hashed.
 26  *      bind hash lock must be held by caller.
 27  *      Returns 1 if caller should call inet_twsk_put() after lock release.
 28  */
 29 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
 30                           struct inet_hashinfo *hashinfo)
 31 {
 32         struct inet_bind_bucket *tb = tw->tw_tb;
 33 
 34         if (!tb)
 35                 return;
 36 
 37         __hlist_del(&tw->tw_bind_node);
 38         tw->tw_tb = NULL;
 39         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
 40         __sock_put((struct sock *)tw);
 41 }
 42 
 43 /* Must be called with locally disabled BHs. */
 44 static void inet_twsk_kill(struct inet_timewait_sock *tw)
 45 {
 46         struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
 47         spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
 48         struct inet_bind_hashbucket *bhead;
 49 
 50         spin_lock(lock);
 51         sk_nulls_del_node_init_rcu((struct sock *)tw);
 52         spin_unlock(lock);
 53 
 54         /* Disassociate with bind bucket. */
 55         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
 56                         hashinfo->bhash_size)];
 57 
 58         spin_lock(&bhead->lock);
 59         inet_twsk_bind_unhash(tw, hashinfo);
 60         spin_unlock(&bhead->lock);
 61 
 62         atomic_dec(&tw->tw_dr->tw_count);
 63         inet_twsk_put(tw);
 64 }
 65 
 66 void inet_twsk_free(struct inet_timewait_sock *tw)
 67 {
 68         struct module *owner = tw->tw_prot->owner;
 69         twsk_destructor((struct sock *)tw);
 70 #ifdef SOCK_REFCNT_DEBUG
 71         pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
 72 #endif
 73         kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
 74         module_put(owner);
 75 }
 76 
 77 void inet_twsk_put(struct inet_timewait_sock *tw)
 78 {
 79         if (refcount_dec_and_test(&tw->tw_refcnt))
 80                 inet_twsk_free(tw);
 81 }
 82 EXPORT_SYMBOL_GPL(inet_twsk_put);
 83 
 84 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
 85                                    struct hlist_nulls_head *list)
 86 {
 87         hlist_nulls_add_head_rcu(&tw->tw_node, list);
 88 }
 89 
 90 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
 91                                     struct hlist_head *list)
 92 {
 93         hlist_add_head(&tw->tw_bind_node, list);
 94 }
 95 
 96 /*
 97  * Enter the time wait state. This is called with locally disabled BH.
 98  * Essentially we whip up a timewait bucket, copy the relevant info into it
 99  * from the SK, and mess with hash chains and list linkage.
100  */
101 void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
102                            struct inet_hashinfo *hashinfo)
103 {
104         const struct inet_sock *inet = inet_sk(sk);
105         const struct inet_connection_sock *icsk = inet_csk(sk);
106         struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
107         spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
108         struct inet_bind_hashbucket *bhead;
109         /* Step 1: Put TW into bind hash. Original socket stays there too.
110            Note, that any socket with inet->num != 0 MUST be bound in
111            binding cache, even if it is closed.
112          */
113         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
114                         hashinfo->bhash_size)];
115         spin_lock(&bhead->lock);
116         tw->tw_tb = icsk->icsk_bind_hash;
117         WARN_ON(!icsk->icsk_bind_hash);
118         inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
119         spin_unlock(&bhead->lock);
120 
121         spin_lock(lock);
122 
123         inet_twsk_add_node_rcu(tw, &ehead->chain);
124 
125         /* Step 3: Remove SK from hash chain */
126         if (__sk_nulls_del_node_init_rcu(sk))
127                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
128 
129         spin_unlock(lock);
130 
131         /* tw_refcnt is set to 3 because we have :
132          * - one reference for bhash chain.
133          * - one reference for ehash chain.
134          * - one reference for timer.
135          * We can use atomic_set() because prior spin_lock()/spin_unlock()
136          * committed into memory all tw fields.
137          * Also note that after this point, we lost our implicit reference
138          * so we are not allowed to use tw anymore.
139          */
140         refcount_set(&tw->tw_refcnt, 3);
141 }
142 EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
143 
144 static void tw_timer_handler(struct timer_list *t)
145 {
146         struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
147 
148         if (tw->tw_kill)
149                 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
150         else
151                 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
152         inet_twsk_kill(tw);
153 }
154 
155 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
156                                            struct inet_timewait_death_row *dr,
157                                            const int state)
158 {
159         struct inet_timewait_sock *tw;
160 
161         if (atomic_read(&dr->tw_count) >= dr->sysctl_max_tw_buckets)
162                 return NULL;
163 
164         tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
165                               GFP_ATOMIC);
166         if (tw) {
167                 const struct inet_sock *inet = inet_sk(sk);
168 
169                 tw->tw_dr           = dr;
170                 /* Give us an identity. */
171                 tw->tw_daddr        = inet->inet_daddr;
172                 tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
173                 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
174                 tw->tw_tos          = inet->tos;
175                 tw->tw_num          = inet->inet_num;
176                 tw->tw_state        = TCP_TIME_WAIT;
177                 tw->tw_substate     = state;
178                 tw->tw_sport        = inet->inet_sport;
179                 tw->tw_dport        = inet->inet_dport;
180                 tw->tw_family       = sk->sk_family;
181                 tw->tw_reuse        = sk->sk_reuse;
182                 tw->tw_reuseport    = sk->sk_reuseport;
183                 tw->tw_hash         = sk->sk_hash;
184                 tw->tw_ipv6only     = 0;
185                 tw->tw_transparent  = inet->transparent;
186                 tw->tw_prot         = sk->sk_prot_creator;
187                 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
188                 twsk_net_set(tw, sock_net(sk));
189                 timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
190                 /*
191                  * Because we use RCU lookups, we should not set tw_refcnt
192                  * to a non null value before everything is setup for this
193                  * timewait socket.
194                  */
195                 refcount_set(&tw->tw_refcnt, 0);
196 
197                 __module_get(tw->tw_prot->owner);
198         }
199 
200         return tw;
201 }
202 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
203 
204 /* These are always called from BH context.  See callers in
205  * tcp_input.c to verify this.
206  */
207 
208 /* This is for handling early-kills of TIME_WAIT sockets.
209  * Warning : consume reference.
210  * Caller should not access tw anymore.
211  */
212 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
213 {
214         if (del_timer_sync(&tw->tw_timer))
215                 inet_twsk_kill(tw);
216         inet_twsk_put(tw);
217 }
218 EXPORT_SYMBOL(inet_twsk_deschedule_put);
219 
220 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
221 {
222         /* timeout := RTO * 3.5
223          *
224          * 3.5 = 1+2+0.5 to wait for two retransmits.
225          *
226          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
227          * our ACK acking that FIN can be lost. If N subsequent retransmitted
228          * FINs (or previous seqments) are lost (probability of such event
229          * is p^(N+1), where p is probability to lose single packet and
230          * time to detect the loss is about RTO*(2^N - 1) with exponential
231          * backoff). Normal timewait length is calculated so, that we
232          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
233          * [ BTW Linux. following BSD, violates this requirement waiting
234          *   only for 60sec, we should wait at least for 240 secs.
235          *   Well, 240 consumes too much of resources 8)
236          * ]
237          * This interval is not reduced to catch old duplicate and
238          * responces to our wandering segments living for two MSLs.
239          * However, if we use PAWS to detect
240          * old duplicates, we can reduce the interval to bounds required
241          * by RTO, rather than MSL. So, if peer understands PAWS, we
242          * kill tw bucket after 3.5*RTO (it is important that this number
243          * is greater than TS tick!) and detect old duplicates with help
244          * of PAWS.
245          */
246 
247         tw->tw_kill = timeo <= 4*HZ;
248         if (!rearm) {
249                 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
250                 atomic_inc(&tw->tw_dr->tw_count);
251         } else {
252                 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
253         }
254 }
255 EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
256 
257 void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
258 {
259         struct inet_timewait_sock *tw;
260         struct sock *sk;
261         struct hlist_nulls_node *node;
262         unsigned int slot;
263 
264         for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
265                 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
266 restart_rcu:
267                 cond_resched();
268                 rcu_read_lock();
269 restart:
270                 sk_nulls_for_each_rcu(sk, node, &head->chain) {
271                         if (sk->sk_state != TCP_TIME_WAIT)
272                                 continue;
273                         tw = inet_twsk(sk);
274                         if ((tw->tw_family != family) ||
275                                 refcount_read(&twsk_net(tw)->ns.count))
276                                 continue;
277 
278                         if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
279                                 continue;
280 
281                         if (unlikely((tw->tw_family != family) ||
282                                      refcount_read(&twsk_net(tw)->ns.count))) {
283                                 inet_twsk_put(tw);
284                                 goto restart;
285                         }
286 
287                         rcu_read_unlock();
288                         local_bh_disable();
289                         inet_twsk_deschedule_put(tw);
290                         local_bh_enable();
291                         goto restart_rcu;
292                 }
293                 /* If the nulls value we got at the end of this lookup is
294                  * not the expected one, we must restart lookup.
295                  * We probably met an item that was moved to another chain.
296                  */
297                 if (get_nulls_value(node) != slot)
298                         goto restart;
299                 rcu_read_unlock();
300         }
301 }
302 EXPORT_SYMBOL_GPL(inet_twsk_purge);
303 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp