~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netfilter/nf_nat_core.c

Version: ~ [ linux-5.15-rc7 ] ~ [ linux-5.14.14 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.75 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.155 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.213 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.252 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.287 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.289 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * (C) 1999-2001 Paul `Rusty' Russell
  3  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  4  * (C) 2011 Patrick McHardy <kaber@trash.net>
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 
 13 #include <linux/module.h>
 14 #include <linux/types.h>
 15 #include <linux/timer.h>
 16 #include <linux/skbuff.h>
 17 #include <linux/gfp.h>
 18 #include <net/xfrm.h>
 19 #include <linux/jhash.h>
 20 #include <linux/rtnetlink.h>
 21 
 22 #include <net/netfilter/nf_conntrack.h>
 23 #include <net/netfilter/nf_conntrack_core.h>
 24 #include <net/netfilter/nf_nat.h>
 25 #include <net/netfilter/nf_nat_l3proto.h>
 26 #include <net/netfilter/nf_nat_core.h>
 27 #include <net/netfilter/nf_nat_helper.h>
 28 #include <net/netfilter/nf_conntrack_helper.h>
 29 #include <net/netfilter/nf_conntrack_seqadj.h>
 30 #include <net/netfilter/nf_conntrack_zones.h>
 31 #include <linux/netfilter/nf_nat.h>
 32 
 33 #include "nf_internals.h"
 34 
 35 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
 36 
 37 static DEFINE_MUTEX(nf_nat_proto_mutex);
 38 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
 39                                                 __read_mostly;
 40 static unsigned int nat_net_id __read_mostly;
 41 
 42 static struct hlist_head *nf_nat_bysource __read_mostly;
 43 static unsigned int nf_nat_htable_size __read_mostly;
 44 static unsigned int nf_nat_hash_rnd __read_mostly;
 45 
 46 struct nf_nat_lookup_hook_priv {
 47         struct nf_hook_entries __rcu *entries;
 48 
 49         struct rcu_head rcu_head;
 50 };
 51 
 52 struct nf_nat_hooks_net {
 53         struct nf_hook_ops *nat_hook_ops;
 54         unsigned int users;
 55 };
 56 
 57 struct nat_net {
 58         struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
 59 };
 60 
 61 inline const struct nf_nat_l3proto *
 62 __nf_nat_l3proto_find(u8 family)
 63 {
 64         return rcu_dereference(nf_nat_l3protos[family]);
 65 }
 66 
 67 #ifdef CONFIG_XFRM
 68 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
 69 {
 70         const struct nf_nat_l3proto *l3proto;
 71         const struct nf_conn *ct;
 72         enum ip_conntrack_info ctinfo;
 73         enum ip_conntrack_dir dir;
 74         unsigned  long statusbit;
 75         u8 family;
 76 
 77         ct = nf_ct_get(skb, &ctinfo);
 78         if (ct == NULL)
 79                 return;
 80 
 81         family = nf_ct_l3num(ct);
 82         l3proto = __nf_nat_l3proto_find(family);
 83         if (l3proto == NULL)
 84                 return;
 85 
 86         dir = CTINFO2DIR(ctinfo);
 87         if (dir == IP_CT_DIR_ORIGINAL)
 88                 statusbit = IPS_DST_NAT;
 89         else
 90                 statusbit = IPS_SRC_NAT;
 91 
 92         l3proto->decode_session(skb, ct, dir, statusbit, fl);
 93 }
 94 
 95 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
 96 {
 97         struct flowi fl;
 98         unsigned int hh_len;
 99         struct dst_entry *dst;
100         struct sock *sk = skb->sk;
101         int err;
102 
103         err = xfrm_decode_session(skb, &fl, family);
104         if (err < 0)
105                 return err;
106 
107         dst = skb_dst(skb);
108         if (dst->xfrm)
109                 dst = ((struct xfrm_dst *)dst)->route;
110         if (!dst_hold_safe(dst))
111                 return -EHOSTUNREACH;
112 
113         if (sk && !net_eq(net, sock_net(sk)))
114                 sk = NULL;
115 
116         dst = xfrm_lookup(net, dst, &fl, sk, 0);
117         if (IS_ERR(dst))
118                 return PTR_ERR(dst);
119 
120         skb_dst_drop(skb);
121         skb_dst_set(skb, dst);
122 
123         /* Change in oif may mean change in hh_len. */
124         hh_len = skb_dst(skb)->dev->hard_header_len;
125         if (skb_headroom(skb) < hh_len &&
126             pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
127                 return -ENOMEM;
128         return 0;
129 }
130 EXPORT_SYMBOL(nf_xfrm_me_harder);
131 #endif /* CONFIG_XFRM */
132 
133 /* We keep an extra hash for each conntrack, for fast searching. */
134 static unsigned int
135 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
136 {
137         unsigned int hash;
138 
139         get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
140 
141         /* Original src, to ensure we map it consistently if poss. */
142         hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
143                       tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
144 
145         return reciprocal_scale(hash, nf_nat_htable_size);
146 }
147 
148 /* Is this tuple already taken? (not by us) */
149 int
150 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
151                   const struct nf_conn *ignored_conntrack)
152 {
153         /* Conntrack tracking doesn't keep track of outgoing tuples; only
154          * incoming ones.  NAT means they don't have a fixed mapping,
155          * so we invert the tuple and look for the incoming reply.
156          *
157          * We could keep a separate hash if this proves too slow.
158          */
159         struct nf_conntrack_tuple reply;
160 
161         nf_ct_invert_tuplepr(&reply, tuple);
162         return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
163 }
164 EXPORT_SYMBOL(nf_nat_used_tuple);
165 
166 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
167                                  const struct nf_nat_range2 *range)
168 {
169         if (t->src.l3num == NFPROTO_IPV4)
170                 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
171                        ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
172 
173         return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
174                ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
175 }
176 
177 /* Is the manipable part of the tuple between min and max incl? */
178 static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
179                              enum nf_nat_manip_type maniptype,
180                              const union nf_conntrack_man_proto *min,
181                              const union nf_conntrack_man_proto *max)
182 {
183         __be16 port;
184 
185         switch (tuple->dst.protonum) {
186         case IPPROTO_ICMP: /* fallthrough */
187         case IPPROTO_ICMPV6:
188                 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
189                        ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
190         case IPPROTO_GRE: /* all fall though */
191         case IPPROTO_TCP:
192         case IPPROTO_UDP:
193         case IPPROTO_UDPLITE:
194         case IPPROTO_DCCP:
195         case IPPROTO_SCTP:
196                 if (maniptype == NF_NAT_MANIP_SRC)
197                         port = tuple->src.u.all;
198                 else
199                         port = tuple->dst.u.all;
200 
201                 return ntohs(port) >= ntohs(min->all) &&
202                        ntohs(port) <= ntohs(max->all);
203         default:
204                 return true;
205         }
206 }
207 
208 /* If we source map this tuple so reply looks like reply_tuple, will
209  * that meet the constraints of range.
210  */
211 static int in_range(const struct nf_conntrack_tuple *tuple,
212                     const struct nf_nat_range2 *range)
213 {
214         /* If we are supposed to map IPs, then we must be in the
215          * range specified, otherwise let this drag us onto a new src IP.
216          */
217         if (range->flags & NF_NAT_RANGE_MAP_IPS &&
218             !nf_nat_inet_in_range(tuple, range))
219                 return 0;
220 
221         if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
222                 return 1;
223 
224         return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
225                                 &range->min_proto, &range->max_proto);
226 }
227 
228 static inline int
229 same_src(const struct nf_conn *ct,
230          const struct nf_conntrack_tuple *tuple)
231 {
232         const struct nf_conntrack_tuple *t;
233 
234         t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
235         return (t->dst.protonum == tuple->dst.protonum &&
236                 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
237                 t->src.u.all == tuple->src.u.all);
238 }
239 
240 /* Only called for SRC manip */
241 static int
242 find_appropriate_src(struct net *net,
243                      const struct nf_conntrack_zone *zone,
244                      const struct nf_conntrack_tuple *tuple,
245                      struct nf_conntrack_tuple *result,
246                      const struct nf_nat_range2 *range)
247 {
248         unsigned int h = hash_by_src(net, tuple);
249         const struct nf_conn *ct;
250 
251         hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
252                 if (same_src(ct, tuple) &&
253                     net_eq(net, nf_ct_net(ct)) &&
254                     nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
255                         /* Copy source part from reply tuple. */
256                         nf_ct_invert_tuplepr(result,
257                                        &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
258                         result->dst = tuple->dst;
259 
260                         if (in_range(result, range))
261                                 return 1;
262                 }
263         }
264         return 0;
265 }
266 
267 /* For [FUTURE] fragmentation handling, we want the least-used
268  * src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
269  * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
270  * 1-65535, we don't do pro-rata allocation based on ports; we choose
271  * the ip with the lowest src-ip/dst-ip/proto usage.
272  */
273 static void
274 find_best_ips_proto(const struct nf_conntrack_zone *zone,
275                     struct nf_conntrack_tuple *tuple,
276                     const struct nf_nat_range2 *range,
277                     const struct nf_conn *ct,
278                     enum nf_nat_manip_type maniptype)
279 {
280         union nf_inet_addr *var_ipp;
281         unsigned int i, max;
282         /* Host order */
283         u32 minip, maxip, j, dist;
284         bool full_range;
285 
286         /* No IP mapping?  Do nothing. */
287         if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
288                 return;
289 
290         if (maniptype == NF_NAT_MANIP_SRC)
291                 var_ipp = &tuple->src.u3;
292         else
293                 var_ipp = &tuple->dst.u3;
294 
295         /* Fast path: only one choice. */
296         if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
297                 *var_ipp = range->min_addr;
298                 return;
299         }
300 
301         if (nf_ct_l3num(ct) == NFPROTO_IPV4)
302                 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
303         else
304                 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
305 
306         /* Hashing source and destination IPs gives a fairly even
307          * spread in practice (if there are a small number of IPs
308          * involved, there usually aren't that many connections
309          * anyway).  The consistency means that servers see the same
310          * client coming from the same IP (some Internet Banking sites
311          * like this), even across reboots.
312          */
313         j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
314                    range->flags & NF_NAT_RANGE_PERSISTENT ?
315                         0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
316 
317         full_range = false;
318         for (i = 0; i <= max; i++) {
319                 /* If first bytes of the address are at the maximum, use the
320                  * distance. Otherwise use the full range.
321                  */
322                 if (!full_range) {
323                         minip = ntohl((__force __be32)range->min_addr.all[i]);
324                         maxip = ntohl((__force __be32)range->max_addr.all[i]);
325                         dist  = maxip - minip + 1;
326                 } else {
327                         minip = 0;
328                         dist  = ~0;
329                 }
330 
331                 var_ipp->all[i] = (__force __u32)
332                         htonl(minip + reciprocal_scale(j, dist));
333                 if (var_ipp->all[i] != range->max_addr.all[i])
334                         full_range = true;
335 
336                 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
337                         j ^= (__force u32)tuple->dst.u3.all[i];
338         }
339 }
340 
341 /* Alter the per-proto part of the tuple (depending on maniptype), to
342  * give a unique tuple in the given range if possible.
343  *
344  * Per-protocol part of tuple is initialized to the incoming packet.
345  */
346 static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
347                                         const struct nf_nat_range2 *range,
348                                         enum nf_nat_manip_type maniptype,
349                                         const struct nf_conn *ct)
350 {
351         unsigned int range_size, min, max, i, attempts;
352         __be16 *keyptr;
353         u16 off;
354         static const unsigned int max_attempts = 128;
355 
356         switch (tuple->dst.protonum) {
357         case IPPROTO_ICMP: /* fallthrough */
358         case IPPROTO_ICMPV6:
359                 /* id is same for either direction... */
360                 keyptr = &tuple->src.u.icmp.id;
361                 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
362                         min = 0;
363                         range_size = 65536;
364                 } else {
365                         min = ntohs(range->min_proto.icmp.id);
366                         range_size = ntohs(range->max_proto.icmp.id) -
367                                      ntohs(range->min_proto.icmp.id) + 1;
368                 }
369                 goto find_free_id;
370 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
371         case IPPROTO_GRE:
372                 /* If there is no master conntrack we are not PPTP,
373                    do not change tuples */
374                 if (!ct->master)
375                         return;
376 
377                 if (maniptype == NF_NAT_MANIP_SRC)
378                         keyptr = &tuple->src.u.gre.key;
379                 else
380                         keyptr = &tuple->dst.u.gre.key;
381 
382                 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
383                         min = 1;
384                         range_size = 65535;
385                 } else {
386                         min = ntohs(range->min_proto.gre.key);
387                         range_size = ntohs(range->max_proto.gre.key) - min + 1;
388                 }
389                 goto find_free_id;
390 #endif
391         case IPPROTO_UDP:       /* fallthrough */
392         case IPPROTO_UDPLITE:   /* fallthrough */
393         case IPPROTO_TCP:       /* fallthrough */
394         case IPPROTO_SCTP:      /* fallthrough */
395         case IPPROTO_DCCP:      /* fallthrough */
396                 if (maniptype == NF_NAT_MANIP_SRC)
397                         keyptr = &tuple->src.u.all;
398                 else
399                         keyptr = &tuple->dst.u.all;
400 
401                 break;
402         default:
403                 return;
404         }
405 
406         /* If no range specified... */
407         if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
408                 /* If it's dst rewrite, can't change port */
409                 if (maniptype == NF_NAT_MANIP_DST)
410                         return;
411 
412                 if (ntohs(*keyptr) < 1024) {
413                         /* Loose convention: >> 512 is credential passing */
414                         if (ntohs(*keyptr) < 512) {
415                                 min = 1;
416                                 range_size = 511 - min + 1;
417                         } else {
418                                 min = 600;
419                                 range_size = 1023 - min + 1;
420                         }
421                 } else {
422                         min = 1024;
423                         range_size = 65535 - 1024 + 1;
424                 }
425         } else {
426                 min = ntohs(range->min_proto.all);
427                 max = ntohs(range->max_proto.all);
428                 if (unlikely(max < min))
429                         swap(max, min);
430                 range_size = max - min + 1;
431         }
432 
433 find_free_id:
434         if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
435                 off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
436         else
437                 off = prandom_u32();
438 
439         attempts = range_size;
440         if (attempts > max_attempts)
441                 attempts = max_attempts;
442 
443         /* We are in softirq; doing a search of the entire range risks
444          * soft lockup when all tuples are already used.
445          *
446          * If we can't find any free port from first offset, pick a new
447          * one and try again, with ever smaller search window.
448          */
449 another_round:
450         for (i = 0; i < attempts; i++, off++) {
451                 *keyptr = htons(min + off % range_size);
452                 if (!nf_nat_used_tuple(tuple, ct))
453                         return;
454         }
455 
456         if (attempts >= range_size || attempts < 16)
457                 return;
458         attempts /= 2;
459         off = prandom_u32();
460         goto another_round;
461 }
462 
463 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
464  * we change the source to map into the range. For NF_INET_PRE_ROUTING
465  * and NF_INET_LOCAL_OUT, we change the destination to map into the
466  * range. It might not be possible to get a unique tuple, but we try.
467  * At worst (or if we race), we will end up with a final duplicate in
468  * __ip_conntrack_confirm and drop the packet. */
469 static void
470 get_unique_tuple(struct nf_conntrack_tuple *tuple,
471                  const struct nf_conntrack_tuple *orig_tuple,
472                  const struct nf_nat_range2 *range,
473                  struct nf_conn *ct,
474                  enum nf_nat_manip_type maniptype)
475 {
476         const struct nf_conntrack_zone *zone;
477         struct net *net = nf_ct_net(ct);
478 
479         zone = nf_ct_zone(ct);
480 
481         /* 1) If this srcip/proto/src-proto-part is currently mapped,
482          * and that same mapping gives a unique tuple within the given
483          * range, use that.
484          *
485          * This is only required for source (ie. NAT/masq) mappings.
486          * So far, we don't do local source mappings, so multiple
487          * manips not an issue.
488          */
489         if (maniptype == NF_NAT_MANIP_SRC &&
490             !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
491                 /* try the original tuple first */
492                 if (in_range(orig_tuple, range)) {
493                         if (!nf_nat_used_tuple(orig_tuple, ct)) {
494                                 *tuple = *orig_tuple;
495                                 return;
496                         }
497                 } else if (find_appropriate_src(net, zone,
498                                                 orig_tuple, tuple, range)) {
499                         pr_debug("get_unique_tuple: Found current src map\n");
500                         if (!nf_nat_used_tuple(tuple, ct))
501                                 return;
502                 }
503         }
504 
505         /* 2) Select the least-used IP/proto combination in the given range */
506         *tuple = *orig_tuple;
507         find_best_ips_proto(zone, tuple, range, ct, maniptype);
508 
509         /* 3) The per-protocol part of the manip is made to map into
510          * the range to make a unique tuple.
511          */
512 
513         /* Only bother mapping if it's not already in range and unique */
514         if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
515                 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
516                         if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
517                             l4proto_in_range(tuple, maniptype,
518                                   &range->min_proto,
519                                   &range->max_proto) &&
520                             (range->min_proto.all == range->max_proto.all ||
521                              !nf_nat_used_tuple(tuple, ct)))
522                                 return;
523                 } else if (!nf_nat_used_tuple(tuple, ct)) {
524                         return;
525                 }
526         }
527 
528         /* Last chance: get protocol to try to obtain unique tuple. */
529         nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
530 }
531 
532 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
533 {
534         struct nf_conn_nat *nat = nfct_nat(ct);
535         if (nat)
536                 return nat;
537 
538         if (!nf_ct_is_confirmed(ct))
539                 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
540 
541         return nat;
542 }
543 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
544 
545 unsigned int
546 nf_nat_setup_info(struct nf_conn *ct,
547                   const struct nf_nat_range2 *range,
548                   enum nf_nat_manip_type maniptype)
549 {
550         struct net *net = nf_ct_net(ct);
551         struct nf_conntrack_tuple curr_tuple, new_tuple;
552 
553         /* Can't setup nat info for confirmed ct. */
554         if (nf_ct_is_confirmed(ct))
555                 return NF_ACCEPT;
556 
557         WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
558                 maniptype != NF_NAT_MANIP_DST);
559 
560         if (WARN_ON(nf_nat_initialized(ct, maniptype)))
561                 return NF_DROP;
562 
563         /* What we've got will look like inverse of reply. Normally
564          * this is what is in the conntrack, except for prior
565          * manipulations (future optimization: if num_manips == 0,
566          * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
567          */
568         nf_ct_invert_tuplepr(&curr_tuple,
569                              &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
570 
571         get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
572 
573         if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
574                 struct nf_conntrack_tuple reply;
575 
576                 /* Alter conntrack table so will recognize replies. */
577                 nf_ct_invert_tuplepr(&reply, &new_tuple);
578                 nf_conntrack_alter_reply(ct, &reply);
579 
580                 /* Non-atomic: we own this at the moment. */
581                 if (maniptype == NF_NAT_MANIP_SRC)
582                         ct->status |= IPS_SRC_NAT;
583                 else
584                         ct->status |= IPS_DST_NAT;
585 
586                 if (nfct_help(ct) && !nfct_seqadj(ct))
587                         if (!nfct_seqadj_ext_add(ct))
588                                 return NF_DROP;
589         }
590 
591         if (maniptype == NF_NAT_MANIP_SRC) {
592                 unsigned int srchash;
593                 spinlock_t *lock;
594 
595                 srchash = hash_by_src(net,
596                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
597                 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
598                 spin_lock_bh(lock);
599                 hlist_add_head_rcu(&ct->nat_bysource,
600                                    &nf_nat_bysource[srchash]);
601                 spin_unlock_bh(lock);
602         }
603 
604         /* It's done. */
605         if (maniptype == NF_NAT_MANIP_DST)
606                 ct->status |= IPS_DST_NAT_DONE;
607         else
608                 ct->status |= IPS_SRC_NAT_DONE;
609 
610         return NF_ACCEPT;
611 }
612 EXPORT_SYMBOL(nf_nat_setup_info);
613 
614 static unsigned int
615 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
616 {
617         /* Force range to this IP; let proto decide mapping for
618          * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
619          * Use reply in case it's already been mangled (eg local packet).
620          */
621         union nf_inet_addr ip =
622                 (manip == NF_NAT_MANIP_SRC ?
623                 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
624                 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
625         struct nf_nat_range2 range = {
626                 .flags          = NF_NAT_RANGE_MAP_IPS,
627                 .min_addr       = ip,
628                 .max_addr       = ip,
629         };
630         return nf_nat_setup_info(ct, &range, manip);
631 }
632 
633 unsigned int
634 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
635 {
636         return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
637 }
638 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
639 
640 static unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
641                                      enum nf_nat_manip_type mtype,
642                                      enum ip_conntrack_dir dir)
643 {
644         const struct nf_nat_l3proto *l3proto;
645         struct nf_conntrack_tuple target;
646 
647         /* We are aiming to look like inverse of other direction. */
648         nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
649 
650         l3proto = __nf_nat_l3proto_find(target.src.l3num);
651         if (!l3proto->manip_pkt(skb, 0, &target, mtype))
652                 return NF_DROP;
653 
654         return NF_ACCEPT;
655 }
656 
657 /* Do packet manipulations according to nf_nat_setup_info. */
658 unsigned int nf_nat_packet(struct nf_conn *ct,
659                            enum ip_conntrack_info ctinfo,
660                            unsigned int hooknum,
661                            struct sk_buff *skb)
662 {
663         enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
664         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
665         unsigned int verdict = NF_ACCEPT;
666         unsigned long statusbit;
667 
668         if (mtype == NF_NAT_MANIP_SRC)
669                 statusbit = IPS_SRC_NAT;
670         else
671                 statusbit = IPS_DST_NAT;
672 
673         /* Invert if this is reply dir. */
674         if (dir == IP_CT_DIR_REPLY)
675                 statusbit ^= IPS_NAT_MASK;
676 
677         /* Non-atomic: these bits don't change. */
678         if (ct->status & statusbit)
679                 verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
680 
681         return verdict;
682 }
683 EXPORT_SYMBOL_GPL(nf_nat_packet);
684 
685 unsigned int
686 nf_nat_inet_fn(void *priv, struct sk_buff *skb,
687                const struct nf_hook_state *state)
688 {
689         struct nf_conn *ct;
690         enum ip_conntrack_info ctinfo;
691         struct nf_conn_nat *nat;
692         /* maniptype == SRC for postrouting. */
693         enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
694 
695         ct = nf_ct_get(skb, &ctinfo);
696         /* Can't track?  It's not due to stress, or conntrack would
697          * have dropped it.  Hence it's the user's responsibilty to
698          * packet filter it out, or implement conntrack/NAT for that
699          * protocol. 8) --RR
700          */
701         if (!ct)
702                 return NF_ACCEPT;
703 
704         nat = nfct_nat(ct);
705 
706         switch (ctinfo) {
707         case IP_CT_RELATED:
708         case IP_CT_RELATED_REPLY:
709                 /* Only ICMPs can be IP_CT_IS_REPLY.  Fallthrough */
710         case IP_CT_NEW:
711                 /* Seen it before?  This can happen for loopback, retrans,
712                  * or local packets.
713                  */
714                 if (!nf_nat_initialized(ct, maniptype)) {
715                         struct nf_nat_lookup_hook_priv *lpriv = priv;
716                         struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
717                         unsigned int ret;
718                         int i;
719 
720                         if (!e)
721                                 goto null_bind;
722 
723                         for (i = 0; i < e->num_hook_entries; i++) {
724                                 ret = e->hooks[i].hook(e->hooks[i].priv, skb,
725                                                        state);
726                                 if (ret != NF_ACCEPT)
727                                         return ret;
728                                 if (nf_nat_initialized(ct, maniptype))
729                                         goto do_nat;
730                         }
731 null_bind:
732                         ret = nf_nat_alloc_null_binding(ct, state->hook);
733                         if (ret != NF_ACCEPT)
734                                 return ret;
735                 } else {
736                         pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
737                                  maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
738                                  ct, ct->status);
739                         if (nf_nat_oif_changed(state->hook, ctinfo, nat,
740                                                state->out))
741                                 goto oif_changed;
742                 }
743                 break;
744         default:
745                 /* ESTABLISHED */
746                 WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
747                         ctinfo != IP_CT_ESTABLISHED_REPLY);
748                 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
749                         goto oif_changed;
750         }
751 do_nat:
752         return nf_nat_packet(ct, ctinfo, state->hook, skb);
753 
754 oif_changed:
755         nf_ct_kill_acct(ct, ctinfo, skb);
756         return NF_DROP;
757 }
758 EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
759 
760 struct nf_nat_proto_clean {
761         u8      l3proto;
762         u8      l4proto;
763 };
764 
765 /* kill conntracks with affected NAT section */
766 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
767 {
768         const struct nf_nat_proto_clean *clean = data;
769 
770         if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
771             (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
772                 return 0;
773 
774         return i->status & IPS_NAT_MASK ? 1 : 0;
775 }
776 
777 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
778 {
779         unsigned int h;
780 
781         h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
782         spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
783         hlist_del_rcu(&ct->nat_bysource);
784         spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
785 }
786 
787 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
788 {
789         if (nf_nat_proto_remove(ct, data))
790                 return 1;
791 
792         /* This module is being removed and conntrack has nat null binding.
793          * Remove it from bysource hash, as the table will be freed soon.
794          *
795          * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
796          * will delete entry from already-freed table.
797          */
798         if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
799                 __nf_nat_cleanup_conntrack(ct);
800 
801         /* don't delete conntrack.  Although that would make things a lot
802          * simpler, we'd end up flushing all conntracks on nat rmmod.
803          */
804         return 0;
805 }
806 
807 static void nf_nat_l3proto_clean(u8 l3proto)
808 {
809         struct nf_nat_proto_clean clean = {
810                 .l3proto = l3proto,
811         };
812 
813         nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
814 }
815 
816 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
817 {
818         RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
819         return 0;
820 }
821 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
822 
823 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
824 {
825         mutex_lock(&nf_nat_proto_mutex);
826         RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
827         mutex_unlock(&nf_nat_proto_mutex);
828         synchronize_rcu();
829 
830         nf_nat_l3proto_clean(l3proto->l3proto);
831 }
832 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
833 
834 /* No one using conntrack by the time this called. */
835 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
836 {
837         if (ct->status & IPS_SRC_NAT_DONE)
838                 __nf_nat_cleanup_conntrack(ct);
839 }
840 
841 static struct nf_ct_ext_type nat_extend __read_mostly = {
842         .len            = sizeof(struct nf_conn_nat),
843         .align          = __alignof__(struct nf_conn_nat),
844         .destroy        = nf_nat_cleanup_conntrack,
845         .id             = NF_CT_EXT_NAT,
846 };
847 
848 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
849 
850 #include <linux/netfilter/nfnetlink.h>
851 #include <linux/netfilter/nfnetlink_conntrack.h>
852 
853 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
854         [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
855         [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
856 };
857 
858 static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
859                                           struct nf_nat_range2 *range)
860 {
861         if (tb[CTA_PROTONAT_PORT_MIN]) {
862                 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
863                 range->max_proto.all = range->min_proto.all;
864                 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
865         }
866         if (tb[CTA_PROTONAT_PORT_MAX]) {
867                 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
868                 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
869         }
870         return 0;
871 }
872 
873 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
874                                      const struct nf_conn *ct,
875                                      struct nf_nat_range2 *range)
876 {
877         struct nlattr *tb[CTA_PROTONAT_MAX+1];
878         int err;
879 
880         err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
881                                protonat_nla_policy, NULL);
882         if (err < 0)
883                 return err;
884 
885         return nf_nat_l4proto_nlattr_to_range(tb, range);
886 }
887 
888 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
889         [CTA_NAT_V4_MINIP]      = { .type = NLA_U32 },
890         [CTA_NAT_V4_MAXIP]      = { .type = NLA_U32 },
891         [CTA_NAT_V6_MINIP]      = { .len = sizeof(struct in6_addr) },
892         [CTA_NAT_V6_MAXIP]      = { .len = sizeof(struct in6_addr) },
893         [CTA_NAT_PROTO]         = { .type = NLA_NESTED },
894 };
895 
896 static int
897 nfnetlink_parse_nat(const struct nlattr *nat,
898                     const struct nf_conn *ct, struct nf_nat_range2 *range,
899                     const struct nf_nat_l3proto *l3proto)
900 {
901         struct nlattr *tb[CTA_NAT_MAX+1];
902         int err;
903 
904         memset(range, 0, sizeof(*range));
905 
906         err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL);
907         if (err < 0)
908                 return err;
909 
910         err = l3proto->nlattr_to_range(tb, range);
911         if (err < 0)
912                 return err;
913 
914         if (!tb[CTA_NAT_PROTO])
915                 return 0;
916 
917         return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
918 }
919 
920 /* This function is called under rcu_read_lock() */
921 static int
922 nfnetlink_parse_nat_setup(struct nf_conn *ct,
923                           enum nf_nat_manip_type manip,
924                           const struct nlattr *attr)
925 {
926         struct nf_nat_range2 range;
927         const struct nf_nat_l3proto *l3proto;
928         int err;
929 
930         /* Should not happen, restricted to creating new conntracks
931          * via ctnetlink.
932          */
933         if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
934                 return -EEXIST;
935 
936         /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
937          * attach the null binding, otherwise this may oops.
938          */
939         l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
940         if (l3proto == NULL)
941                 return -EAGAIN;
942 
943         /* No NAT information has been passed, allocate the null-binding */
944         if (attr == NULL)
945                 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
946 
947         err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
948         if (err < 0)
949                 return err;
950 
951         return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
952 }
953 #else
954 static int
955 nfnetlink_parse_nat_setup(struct nf_conn *ct,
956                           enum nf_nat_manip_type manip,
957                           const struct nlattr *attr)
958 {
959         return -EOPNOTSUPP;
960 }
961 #endif
962 
963 static struct nf_ct_helper_expectfn follow_master_nat = {
964         .name           = "nat-follow-master",
965         .expectfn       = nf_nat_follow_master,
966 };
967 
968 int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
969                        const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
970 {
971         struct nat_net *nat_net = net_generic(net, nat_net_id);
972         struct nf_nat_hooks_net *nat_proto_net;
973         struct nf_nat_lookup_hook_priv *priv;
974         unsigned int hooknum = ops->hooknum;
975         struct nf_hook_ops *nat_ops;
976         int i, ret;
977 
978         if (WARN_ON_ONCE(ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
979                 return -EINVAL;
980 
981         nat_proto_net = &nat_net->nat_proto_net[ops->pf];
982 
983         for (i = 0; i < ops_count; i++) {
984                 if (WARN_ON(orig_nat_ops[i].pf != ops->pf))
985                         return -EINVAL;
986                 if (orig_nat_ops[i].hooknum == hooknum) {
987                         hooknum = i;
988                         break;
989                 }
990         }
991 
992         if (WARN_ON_ONCE(i == ops_count))
993                 return -EINVAL;
994 
995         mutex_lock(&nf_nat_proto_mutex);
996         if (!nat_proto_net->nat_hook_ops) {
997                 WARN_ON(nat_proto_net->users != 0);
998 
999                 nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
1000                 if (!nat_ops) {
1001                         mutex_unlock(&nf_nat_proto_mutex);
1002                         return -ENOMEM;
1003                 }
1004 
1005                 for (i = 0; i < ops_count; i++) {
1006                         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1007                         if (priv) {
1008                                 nat_ops[i].priv = priv;
1009                                 continue;
1010                         }
1011                         mutex_unlock(&nf_nat_proto_mutex);
1012                         while (i)
1013                                 kfree(nat_ops[--i].priv);
1014                         kfree(nat_ops);
1015                         return -ENOMEM;
1016                 }
1017 
1018                 ret = nf_register_net_hooks(net, nat_ops, ops_count);
1019                 if (ret < 0) {
1020                         mutex_unlock(&nf_nat_proto_mutex);
1021                         for (i = 0; i < ops_count; i++)
1022                                 kfree(nat_ops[i].priv);
1023                         kfree(nat_ops);
1024                         return ret;
1025                 }
1026 
1027                 nat_proto_net->nat_hook_ops = nat_ops;
1028         }
1029 
1030         nat_ops = nat_proto_net->nat_hook_ops;
1031         priv = nat_ops[hooknum].priv;
1032         if (WARN_ON_ONCE(!priv)) {
1033                 mutex_unlock(&nf_nat_proto_mutex);
1034                 return -EOPNOTSUPP;
1035         }
1036 
1037         ret = nf_hook_entries_insert_raw(&priv->entries, ops);
1038         if (ret == 0)
1039                 nat_proto_net->users++;
1040 
1041         mutex_unlock(&nf_nat_proto_mutex);
1042         return ret;
1043 }
1044 EXPORT_SYMBOL_GPL(nf_nat_register_fn);
1045 
1046 void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
1047                           unsigned int ops_count)
1048 {
1049         struct nat_net *nat_net = net_generic(net, nat_net_id);
1050         struct nf_nat_hooks_net *nat_proto_net;
1051         struct nf_nat_lookup_hook_priv *priv;
1052         struct nf_hook_ops *nat_ops;
1053         int hooknum = ops->hooknum;
1054         int i;
1055 
1056         if (ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net))
1057                 return;
1058 
1059         nat_proto_net = &nat_net->nat_proto_net[ops->pf];
1060 
1061         mutex_lock(&nf_nat_proto_mutex);
1062         if (WARN_ON(nat_proto_net->users == 0))
1063                 goto unlock;
1064 
1065         nat_proto_net->users--;
1066 
1067         nat_ops = nat_proto_net->nat_hook_ops;
1068         for (i = 0; i < ops_count; i++) {
1069                 if (nat_ops[i].hooknum == hooknum) {
1070                         hooknum = i;
1071                         break;
1072                 }
1073         }
1074         if (WARN_ON_ONCE(i == ops_count))
1075                 goto unlock;
1076         priv = nat_ops[hooknum].priv;
1077         nf_hook_entries_delete_raw(&priv->entries, ops);
1078 
1079         if (nat_proto_net->users == 0) {
1080                 nf_unregister_net_hooks(net, nat_ops, ops_count);
1081 
1082                 for (i = 0; i < ops_count; i++) {
1083                         priv = nat_ops[i].priv;
1084                         kfree_rcu(priv, rcu_head);
1085                 }
1086 
1087                 nat_proto_net->nat_hook_ops = NULL;
1088                 kfree(nat_ops);
1089         }
1090 unlock:
1091         mutex_unlock(&nf_nat_proto_mutex);
1092 }
1093 EXPORT_SYMBOL_GPL(nf_nat_unregister_fn);
1094 
1095 static struct pernet_operations nat_net_ops = {
1096         .id = &nat_net_id,
1097         .size = sizeof(struct nat_net),
1098 };
1099 
1100 static struct nf_nat_hook nat_hook = {
1101         .parse_nat_setup        = nfnetlink_parse_nat_setup,
1102 #ifdef CONFIG_XFRM
1103         .decode_session         = __nf_nat_decode_session,
1104 #endif
1105         .manip_pkt              = nf_nat_manip_pkt,
1106 };
1107 
1108 static int __init nf_nat_init(void)
1109 {
1110         int ret, i;
1111 
1112         /* Leave them the same for the moment. */
1113         nf_nat_htable_size = nf_conntrack_htable_size;
1114         if (nf_nat_htable_size < CONNTRACK_LOCKS)
1115                 nf_nat_htable_size = CONNTRACK_LOCKS;
1116 
1117         nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
1118         if (!nf_nat_bysource)
1119                 return -ENOMEM;
1120 
1121         ret = nf_ct_extend_register(&nat_extend);
1122         if (ret < 0) {
1123                 kvfree(nf_nat_bysource);
1124                 pr_err("Unable to register extension\n");
1125                 return ret;
1126         }
1127 
1128         for (i = 0; i < CONNTRACK_LOCKS; i++)
1129                 spin_lock_init(&nf_nat_locks[i]);
1130 
1131         ret = register_pernet_subsys(&nat_net_ops);
1132         if (ret < 0) {
1133                 nf_ct_extend_unregister(&nat_extend);
1134                 return ret;
1135         }
1136 
1137         nf_ct_helper_expectfn_register(&follow_master_nat);
1138 
1139         WARN_ON(nf_nat_hook != NULL);
1140         RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
1141 
1142         return 0;
1143 }
1144 
1145 static void __exit nf_nat_cleanup(void)
1146 {
1147         struct nf_nat_proto_clean clean = {};
1148 
1149         nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
1150 
1151         nf_ct_extend_unregister(&nat_extend);
1152         nf_ct_helper_expectfn_unregister(&follow_master_nat);
1153         RCU_INIT_POINTER(nf_nat_hook, NULL);
1154 
1155         synchronize_net();
1156         kvfree(nf_nat_bysource);
1157         unregister_pernet_subsys(&nat_net_ops);
1158 }
1159 
1160 MODULE_LICENSE("GPL");
1161 
1162 module_init(nf_nat_init);
1163 module_exit(nf_nat_cleanup);
1164 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp