~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netfilter/nf_conntrack_expect.c

Version: ~ [ linux-5.12-rc7 ] ~ [ linux-5.11.13 ] ~ [ linux-5.10.29 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.111 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.186 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.230 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.266 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.266 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* Expectation handling for nf_conntrack. */
  2 
  3 /* (C) 1999-2001 Paul `Rusty' Russell
  4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
  6  * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License version 2 as
 10  * published by the Free Software Foundation.
 11  */
 12 
 13 #include <linux/types.h>
 14 #include <linux/netfilter.h>
 15 #include <linux/skbuff.h>
 16 #include <linux/proc_fs.h>
 17 #include <linux/seq_file.h>
 18 #include <linux/stddef.h>
 19 #include <linux/slab.h>
 20 #include <linux/err.h>
 21 #include <linux/percpu.h>
 22 #include <linux/kernel.h>
 23 #include <linux/jhash.h>
 24 #include <linux/moduleparam.h>
 25 #include <linux/export.h>
 26 #include <net/net_namespace.h>
 27 #include <net/netns/hash.h>
 28 
 29 #include <net/netfilter/nf_conntrack.h>
 30 #include <net/netfilter/nf_conntrack_core.h>
 31 #include <net/netfilter/nf_conntrack_expect.h>
 32 #include <net/netfilter/nf_conntrack_helper.h>
 33 #include <net/netfilter/nf_conntrack_tuple.h>
 34 #include <net/netfilter/nf_conntrack_zones.h>
 35 
 36 unsigned int nf_ct_expect_hsize __read_mostly;
 37 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
 38 
 39 struct hlist_head *nf_ct_expect_hash __read_mostly;
 40 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
 41 
 42 unsigned int nf_ct_expect_max __read_mostly;
 43 
 44 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
 45 static unsigned int nf_ct_expect_hashrnd __read_mostly;
 46 
 47 /* nf_conntrack_expect helper functions */
 48 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
 49                                 u32 portid, int report)
 50 {
 51         struct nf_conn_help *master_help = nfct_help(exp->master);
 52         struct net *net = nf_ct_exp_net(exp);
 53 
 54         NF_CT_ASSERT(master_help);
 55         NF_CT_ASSERT(!timer_pending(&exp->timeout));
 56 
 57         hlist_del_rcu(&exp->hnode);
 58         net->ct.expect_count--;
 59 
 60         hlist_del_rcu(&exp->lnode);
 61         master_help->expecting[exp->class]--;
 62 
 63         nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
 64         nf_ct_expect_put(exp);
 65 
 66         NF_CT_STAT_INC(net, expect_delete);
 67 }
 68 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
 69 
 70 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
 71 {
 72         struct nf_conntrack_expect *exp = (void *)ul_expect;
 73 
 74         spin_lock_bh(&nf_conntrack_expect_lock);
 75         nf_ct_unlink_expect(exp);
 76         spin_unlock_bh(&nf_conntrack_expect_lock);
 77         nf_ct_expect_put(exp);
 78 }
 79 
 80 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
 81 {
 82         unsigned int hash, seed;
 83 
 84         get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
 85 
 86         seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
 87 
 88         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
 89                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
 90                        (__force __u16)tuple->dst.u.all) ^ seed);
 91 
 92         return reciprocal_scale(hash, nf_ct_expect_hsize);
 93 }
 94 
 95 static bool
 96 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
 97                 const struct nf_conntrack_expect *i,
 98                 const struct nf_conntrack_zone *zone,
 99                 const struct net *net)
100 {
101         return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102                net_eq(net, nf_ct_net(i->master)) &&
103                nf_ct_zone_equal_any(i->master, zone);
104 }
105 
106 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
107 {
108         if (del_timer(&exp->timeout)) {
109                 nf_ct_unlink_expect(exp);
110                 nf_ct_expect_put(exp);
111                 return true;
112         }
113         return false;
114 }
115 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
116 
117 struct nf_conntrack_expect *
118 __nf_ct_expect_find(struct net *net,
119                     const struct nf_conntrack_zone *zone,
120                     const struct nf_conntrack_tuple *tuple)
121 {
122         struct nf_conntrack_expect *i;
123         unsigned int h;
124 
125         if (!net->ct.expect_count)
126                 return NULL;
127 
128         h = nf_ct_expect_dst_hash(net, tuple);
129         hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
130                 if (nf_ct_exp_equal(tuple, i, zone, net))
131                         return i;
132         }
133         return NULL;
134 }
135 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
136 
137 /* Just find a expectation corresponding to a tuple. */
138 struct nf_conntrack_expect *
139 nf_ct_expect_find_get(struct net *net,
140                       const struct nf_conntrack_zone *zone,
141                       const struct nf_conntrack_tuple *tuple)
142 {
143         struct nf_conntrack_expect *i;
144 
145         rcu_read_lock();
146         i = __nf_ct_expect_find(net, zone, tuple);
147         if (i && !refcount_inc_not_zero(&i->use))
148                 i = NULL;
149         rcu_read_unlock();
150 
151         return i;
152 }
153 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
154 
155 /* If an expectation for this connection is found, it gets delete from
156  * global list then returned. */
157 struct nf_conntrack_expect *
158 nf_ct_find_expectation(struct net *net,
159                        const struct nf_conntrack_zone *zone,
160                        const struct nf_conntrack_tuple *tuple)
161 {
162         struct nf_conntrack_expect *i, *exp = NULL;
163         unsigned int h;
164 
165         if (!net->ct.expect_count)
166                 return NULL;
167 
168         h = nf_ct_expect_dst_hash(net, tuple);
169         hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
170                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
171                     nf_ct_exp_equal(tuple, i, zone, net)) {
172                         exp = i;
173                         break;
174                 }
175         }
176         if (!exp)
177                 return NULL;
178 
179         /* If master is not in hash table yet (ie. packet hasn't left
180            this machine yet), how can other end know about expected?
181            Hence these are not the droids you are looking for (if
182            master ct never got confirmed, we'd hold a reference to it
183            and weird things would happen to future packets). */
184         if (!nf_ct_is_confirmed(exp->master))
185                 return NULL;
186 
187         /* Avoid race with other CPUs, that for exp->master ct, is
188          * about to invoke ->destroy(), or nf_ct_delete() via timeout
189          * or early_drop().
190          *
191          * The atomic_inc_not_zero() check tells:  If that fails, we
192          * know that the ct is being destroyed.  If it succeeds, we
193          * can be sure the ct cannot disappear underneath.
194          */
195         if (unlikely(nf_ct_is_dying(exp->master) ||
196                      !atomic_inc_not_zero(&exp->master->ct_general.use)))
197                 return NULL;
198 
199         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
200                 refcount_inc(&exp->use);
201                 return exp;
202         } else if (del_timer(&exp->timeout)) {
203                 nf_ct_unlink_expect(exp);
204                 return exp;
205         }
206         /* Undo exp->master refcnt increase, if del_timer() failed */
207         nf_ct_put(exp->master);
208 
209         return NULL;
210 }
211 
212 /* delete all expectations for this conntrack */
213 void nf_ct_remove_expectations(struct nf_conn *ct)
214 {
215         struct nf_conn_help *help = nfct_help(ct);
216         struct nf_conntrack_expect *exp;
217         struct hlist_node *next;
218 
219         /* Optimization: most connection never expect any others. */
220         if (!help)
221                 return;
222 
223         spin_lock_bh(&nf_conntrack_expect_lock);
224         hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
225                 nf_ct_remove_expect(exp);
226         }
227         spin_unlock_bh(&nf_conntrack_expect_lock);
228 }
229 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
230 
231 /* Would two expected things clash? */
232 static inline int expect_clash(const struct nf_conntrack_expect *a,
233                                const struct nf_conntrack_expect *b)
234 {
235         /* Part covered by intersection of masks must be unequal,
236            otherwise they clash */
237         struct nf_conntrack_tuple_mask intersect_mask;
238         int count;
239 
240         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
241 
242         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
243                 intersect_mask.src.u3.all[count] =
244                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
245         }
246 
247         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
248                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
249                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
250 }
251 
252 static inline int expect_matches(const struct nf_conntrack_expect *a,
253                                  const struct nf_conntrack_expect *b)
254 {
255         return a->master == b->master && a->class == b->class &&
256                nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257                nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
259                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
260 }
261 
262 /* Generally a bad idea to call this: could have matched already. */
263 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
264 {
265         spin_lock_bh(&nf_conntrack_expect_lock);
266         nf_ct_remove_expect(exp);
267         spin_unlock_bh(&nf_conntrack_expect_lock);
268 }
269 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
270 
271 /* We don't increase the master conntrack refcount for non-fulfilled
272  * conntracks. During the conntrack destruction, the expectations are
273  * always killed before the conntrack itself */
274 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
275 {
276         struct nf_conntrack_expect *new;
277 
278         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
279         if (!new)
280                 return NULL;
281 
282         new->master = me;
283         refcount_set(&new->use, 1);
284         return new;
285 }
286 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
287 
288 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
289                        u_int8_t family,
290                        const union nf_inet_addr *saddr,
291                        const union nf_inet_addr *daddr,
292                        u_int8_t proto, const __be16 *src, const __be16 *dst)
293 {
294         int len;
295 
296         if (family == AF_INET)
297                 len = 4;
298         else
299                 len = 16;
300 
301         exp->flags = 0;
302         exp->class = class;
303         exp->expectfn = NULL;
304         exp->helper = NULL;
305         exp->tuple.src.l3num = family;
306         exp->tuple.dst.protonum = proto;
307 
308         if (saddr) {
309                 memcpy(&exp->tuple.src.u3, saddr, len);
310                 if (sizeof(exp->tuple.src.u3) > len)
311                         /* address needs to be cleared for nf_ct_tuple_equal */
312                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
313                                sizeof(exp->tuple.src.u3) - len);
314                 memset(&exp->mask.src.u3, 0xFF, len);
315                 if (sizeof(exp->mask.src.u3) > len)
316                         memset((void *)&exp->mask.src.u3 + len, 0x00,
317                                sizeof(exp->mask.src.u3) - len);
318         } else {
319                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
320                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
321         }
322 
323         if (src) {
324                 exp->tuple.src.u.all = *src;
325                 exp->mask.src.u.all = htons(0xFFFF);
326         } else {
327                 exp->tuple.src.u.all = 0;
328                 exp->mask.src.u.all = 0;
329         }
330 
331         memcpy(&exp->tuple.dst.u3, daddr, len);
332         if (sizeof(exp->tuple.dst.u3) > len)
333                 /* address needs to be cleared for nf_ct_tuple_equal */
334                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
335                        sizeof(exp->tuple.dst.u3) - len);
336 
337         exp->tuple.dst.u.all = *dst;
338 
339 #ifdef CONFIG_NF_NAT_NEEDED
340         memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
341         memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
342 #endif
343 }
344 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
345 
346 static void nf_ct_expect_free_rcu(struct rcu_head *head)
347 {
348         struct nf_conntrack_expect *exp;
349 
350         exp = container_of(head, struct nf_conntrack_expect, rcu);
351         kmem_cache_free(nf_ct_expect_cachep, exp);
352 }
353 
354 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
355 {
356         if (refcount_dec_and_test(&exp->use))
357                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
358 }
359 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
360 
361 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
362 {
363         struct nf_conn_help *master_help = nfct_help(exp->master);
364         struct nf_conntrack_helper *helper;
365         struct net *net = nf_ct_exp_net(exp);
366         unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
367 
368         /* two references : one for hash insert, one for the timer */
369         refcount_add(2, &exp->use);
370 
371         hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
372         master_help->expecting[exp->class]++;
373 
374         hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
375         net->ct.expect_count++;
376 
377         setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
378                     (unsigned long)exp);
379         helper = rcu_dereference_protected(master_help->helper,
380                                            lockdep_is_held(&nf_conntrack_expect_lock));
381         if (helper) {
382                 exp->timeout.expires = jiffies +
383                         helper->expect_policy[exp->class].timeout * HZ;
384         }
385         add_timer(&exp->timeout);
386 
387         NF_CT_STAT_INC(net, expect_create);
388 }
389 
390 /* Race with expectations being used means we could have none to find; OK. */
391 static void evict_oldest_expect(struct nf_conn *master,
392                                 struct nf_conntrack_expect *new)
393 {
394         struct nf_conn_help *master_help = nfct_help(master);
395         struct nf_conntrack_expect *exp, *last = NULL;
396 
397         hlist_for_each_entry(exp, &master_help->expectations, lnode) {
398                 if (exp->class == new->class)
399                         last = exp;
400         }
401 
402         if (last)
403                 nf_ct_remove_expect(last);
404 }
405 
406 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
407 {
408         const struct nf_conntrack_expect_policy *p;
409         struct nf_conntrack_expect *i;
410         struct nf_conn *master = expect->master;
411         struct nf_conn_help *master_help = nfct_help(master);
412         struct nf_conntrack_helper *helper;
413         struct net *net = nf_ct_exp_net(expect);
414         struct hlist_node *next;
415         unsigned int h;
416         int ret = 0;
417 
418         if (!master_help) {
419                 ret = -ESHUTDOWN;
420                 goto out;
421         }
422         h = nf_ct_expect_dst_hash(net, &expect->tuple);
423         hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
424                 if (expect_matches(i, expect)) {
425                         if (nf_ct_remove_expect(i))
426                                 break;
427                 } else if (expect_clash(i, expect)) {
428                         ret = -EBUSY;
429                         goto out;
430                 }
431         }
432         /* Will be over limit? */
433         helper = rcu_dereference_protected(master_help->helper,
434                                            lockdep_is_held(&nf_conntrack_expect_lock));
435         if (helper) {
436                 p = &helper->expect_policy[expect->class];
437                 if (p->max_expected &&
438                     master_help->expecting[expect->class] >= p->max_expected) {
439                         evict_oldest_expect(master, expect);
440                         if (master_help->expecting[expect->class]
441                                                 >= p->max_expected) {
442                                 ret = -EMFILE;
443                                 goto out;
444                         }
445                 }
446         }
447 
448         if (net->ct.expect_count >= nf_ct_expect_max) {
449                 net_warn_ratelimited("nf_conntrack: expectation table full\n");
450                 ret = -EMFILE;
451         }
452 out:
453         return ret;
454 }
455 
456 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
457                                 u32 portid, int report)
458 {
459         int ret;
460 
461         spin_lock_bh(&nf_conntrack_expect_lock);
462         ret = __nf_ct_expect_check(expect);
463         if (ret < 0)
464                 goto out;
465 
466         nf_ct_expect_insert(expect);
467 
468         spin_unlock_bh(&nf_conntrack_expect_lock);
469         nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
470         return 0;
471 out:
472         spin_unlock_bh(&nf_conntrack_expect_lock);
473         return ret;
474 }
475 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
476 
477 #ifdef CONFIG_NF_CONNTRACK_PROCFS
478 struct ct_expect_iter_state {
479         struct seq_net_private p;
480         unsigned int bucket;
481 };
482 
483 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
484 {
485         struct ct_expect_iter_state *st = seq->private;
486         struct hlist_node *n;
487 
488         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
489                 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
490                 if (n)
491                         return n;
492         }
493         return NULL;
494 }
495 
496 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
497                                              struct hlist_node *head)
498 {
499         struct ct_expect_iter_state *st = seq->private;
500 
501         head = rcu_dereference(hlist_next_rcu(head));
502         while (head == NULL) {
503                 if (++st->bucket >= nf_ct_expect_hsize)
504                         return NULL;
505                 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
506         }
507         return head;
508 }
509 
510 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
511 {
512         struct hlist_node *head = ct_expect_get_first(seq);
513 
514         if (head)
515                 while (pos && (head = ct_expect_get_next(seq, head)))
516                         pos--;
517         return pos ? NULL : head;
518 }
519 
520 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
521         __acquires(RCU)
522 {
523         rcu_read_lock();
524         return ct_expect_get_idx(seq, *pos);
525 }
526 
527 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
528 {
529         (*pos)++;
530         return ct_expect_get_next(seq, v);
531 }
532 
533 static void exp_seq_stop(struct seq_file *seq, void *v)
534         __releases(RCU)
535 {
536         rcu_read_unlock();
537 }
538 
539 static int exp_seq_show(struct seq_file *s, void *v)
540 {
541         struct nf_conntrack_expect *expect;
542         struct nf_conntrack_helper *helper;
543         struct hlist_node *n = v;
544         char *delim = "";
545 
546         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
547 
548         if (expect->timeout.function)
549                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
550                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
551         else
552                 seq_puts(s, "- ");
553         seq_printf(s, "l3proto = %u proto=%u ",
554                    expect->tuple.src.l3num,
555                    expect->tuple.dst.protonum);
556         print_tuple(s, &expect->tuple,
557                     __nf_ct_l3proto_find(expect->tuple.src.l3num),
558                     __nf_ct_l4proto_find(expect->tuple.src.l3num,
559                                        expect->tuple.dst.protonum));
560 
561         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
562                 seq_puts(s, "PERMANENT");
563                 delim = ",";
564         }
565         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
566                 seq_printf(s, "%sINACTIVE", delim);
567                 delim = ",";
568         }
569         if (expect->flags & NF_CT_EXPECT_USERSPACE)
570                 seq_printf(s, "%sUSERSPACE", delim);
571 
572         helper = rcu_dereference(nfct_help(expect->master)->helper);
573         if (helper) {
574                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
575                 if (helper->expect_policy[expect->class].name[0])
576                         seq_printf(s, "/%s",
577                                    helper->expect_policy[expect->class].name);
578         }
579 
580         seq_putc(s, '\n');
581 
582         return 0;
583 }
584 
585 static const struct seq_operations exp_seq_ops = {
586         .start = exp_seq_start,
587         .next = exp_seq_next,
588         .stop = exp_seq_stop,
589         .show = exp_seq_show
590 };
591 
592 static int exp_open(struct inode *inode, struct file *file)
593 {
594         return seq_open_net(inode, file, &exp_seq_ops,
595                         sizeof(struct ct_expect_iter_state));
596 }
597 
598 static const struct file_operations exp_file_ops = {
599         .owner   = THIS_MODULE,
600         .open    = exp_open,
601         .read    = seq_read,
602         .llseek  = seq_lseek,
603         .release = seq_release_net,
604 };
605 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
606 
607 static int exp_proc_init(struct net *net)
608 {
609 #ifdef CONFIG_NF_CONNTRACK_PROCFS
610         struct proc_dir_entry *proc;
611         kuid_t root_uid;
612         kgid_t root_gid;
613 
614         proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
615                            &exp_file_ops);
616         if (!proc)
617                 return -ENOMEM;
618 
619         root_uid = make_kuid(net->user_ns, 0);
620         root_gid = make_kgid(net->user_ns, 0);
621         if (uid_valid(root_uid) && gid_valid(root_gid))
622                 proc_set_user(proc, root_uid, root_gid);
623 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
624         return 0;
625 }
626 
627 static void exp_proc_remove(struct net *net)
628 {
629 #ifdef CONFIG_NF_CONNTRACK_PROCFS
630         remove_proc_entry("nf_conntrack_expect", net->proc_net);
631 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
632 }
633 
634 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
635 
636 int nf_conntrack_expect_pernet_init(struct net *net)
637 {
638         net->ct.expect_count = 0;
639         return exp_proc_init(net);
640 }
641 
642 void nf_conntrack_expect_pernet_fini(struct net *net)
643 {
644         exp_proc_remove(net);
645 }
646 
647 int nf_conntrack_expect_init(void)
648 {
649         if (!nf_ct_expect_hsize) {
650                 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
651                 if (!nf_ct_expect_hsize)
652                         nf_ct_expect_hsize = 1;
653         }
654         nf_ct_expect_max = nf_ct_expect_hsize * 4;
655         nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
656                                 sizeof(struct nf_conntrack_expect),
657                                 0, 0, NULL);
658         if (!nf_ct_expect_cachep)
659                 return -ENOMEM;
660 
661         nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
662         if (!nf_ct_expect_hash) {
663                 kmem_cache_destroy(nf_ct_expect_cachep);
664                 return -ENOMEM;
665         }
666 
667         return 0;
668 }
669 
670 void nf_conntrack_expect_fini(void)
671 {
672         rcu_barrier(); /* Wait for call_rcu() before destroy */
673         kmem_cache_destroy(nf_ct_expect_cachep);
674         nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
675 }
676 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp