~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/rcu.h

Version: ~ [ linux-4.15-rc3 ] ~ [ linux-4.14.5 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.68 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.105 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.47 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.87 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.51 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.96 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update definitions shared among RCU implementations.
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, you can access it online at
 16  * http://www.gnu.org/licenses/gpl-2.0.html.
 17  *
 18  * Copyright IBM Corporation, 2011
 19  *
 20  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21  */
 22 
 23 #ifndef __LINUX_RCU_H
 24 #define __LINUX_RCU_H
 25 
 26 #include <trace/events/rcu.h>
 27 #ifdef CONFIG_RCU_TRACE
 28 #define RCU_TRACE(stmt) stmt
 29 #else /* #ifdef CONFIG_RCU_TRACE */
 30 #define RCU_TRACE(stmt)
 31 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 32 
 33 /*
 34  * Process-level increment to ->dynticks_nesting field.  This allows for
 35  * architectures that use half-interrupts and half-exceptions from
 36  * process context.
 37  *
 38  * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
 39  * that counts the number of process-based reasons why RCU cannot
 40  * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
 41  * is the value used to increment or decrement this field.
 42  *
 43  * The rest of the bits could in principle be used to count interrupts,
 44  * but this would mean that a negative-one value in the interrupt
 45  * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
 46  * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
 47  * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
 48  * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
 49  * initial exit from idle.
 50  */
 51 #define DYNTICK_TASK_NEST_WIDTH 7
 52 #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
 53 #define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
 54 #define DYNTICK_TASK_FLAG          ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
 55 #define DYNTICK_TASK_MASK          ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
 56 #define DYNTICK_TASK_EXIT_IDLE     (DYNTICK_TASK_NEST_VALUE + \
 57                                     DYNTICK_TASK_FLAG)
 58 
 59 
 60 /*
 61  * Grace-period counter management.
 62  */
 63 
 64 #define RCU_SEQ_CTR_SHIFT       2
 65 #define RCU_SEQ_STATE_MASK      ((1 << RCU_SEQ_CTR_SHIFT) - 1)
 66 
 67 /*
 68  * Return the counter portion of a sequence number previously returned
 69  * by rcu_seq_snap() or rcu_seq_current().
 70  */
 71 static inline unsigned long rcu_seq_ctr(unsigned long s)
 72 {
 73         return s >> RCU_SEQ_CTR_SHIFT;
 74 }
 75 
 76 /*
 77  * Return the state portion of a sequence number previously returned
 78  * by rcu_seq_snap() or rcu_seq_current().
 79  */
 80 static inline int rcu_seq_state(unsigned long s)
 81 {
 82         return s & RCU_SEQ_STATE_MASK;
 83 }
 84 
 85 /*
 86  * Set the state portion of the pointed-to sequence number.
 87  * The caller is responsible for preventing conflicting updates.
 88  */
 89 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
 90 {
 91         WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
 92         WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
 93 }
 94 
 95 /* Adjust sequence number for start of update-side operation. */
 96 static inline void rcu_seq_start(unsigned long *sp)
 97 {
 98         WRITE_ONCE(*sp, *sp + 1);
 99         smp_mb(); /* Ensure update-side operation after counter increment. */
100         WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
101 }
102 
103 /* Adjust sequence number for end of update-side operation. */
104 static inline void rcu_seq_end(unsigned long *sp)
105 {
106         smp_mb(); /* Ensure update-side operation before counter increment. */
107         WARN_ON_ONCE(!rcu_seq_state(*sp));
108         WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
109 }
110 
111 /* Take a snapshot of the update side's sequence number. */
112 static inline unsigned long rcu_seq_snap(unsigned long *sp)
113 {
114         unsigned long s;
115 
116         s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
117         smp_mb(); /* Above access must not bleed into critical section. */
118         return s;
119 }
120 
121 /* Return the current value the update side's sequence number, no ordering. */
122 static inline unsigned long rcu_seq_current(unsigned long *sp)
123 {
124         return READ_ONCE(*sp);
125 }
126 
127 /*
128  * Given a snapshot from rcu_seq_snap(), determine whether or not a
129  * full update-side operation has occurred.
130  */
131 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
132 {
133         return ULONG_CMP_GE(READ_ONCE(*sp), s);
134 }
135 
136 /*
137  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
138  * by call_rcu() and rcu callback execution, and are therefore not part of the
139  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
140  */
141 
142 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
143 # define STATE_RCU_HEAD_READY   0
144 # define STATE_RCU_HEAD_QUEUED  1
145 
146 extern struct debug_obj_descr rcuhead_debug_descr;
147 
148 static inline int debug_rcu_head_queue(struct rcu_head *head)
149 {
150         int r1;
151 
152         r1 = debug_object_activate(head, &rcuhead_debug_descr);
153         debug_object_active_state(head, &rcuhead_debug_descr,
154                                   STATE_RCU_HEAD_READY,
155                                   STATE_RCU_HEAD_QUEUED);
156         return r1;
157 }
158 
159 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
160 {
161         debug_object_active_state(head, &rcuhead_debug_descr,
162                                   STATE_RCU_HEAD_QUEUED,
163                                   STATE_RCU_HEAD_READY);
164         debug_object_deactivate(head, &rcuhead_debug_descr);
165 }
166 #else   /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
167 static inline int debug_rcu_head_queue(struct rcu_head *head)
168 {
169         return 0;
170 }
171 
172 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
173 {
174 }
175 #endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
176 
177 void kfree(const void *);
178 
179 /*
180  * Reclaim the specified callback, either by invoking it (non-lazy case)
181  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
182  */
183 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
184 {
185         unsigned long offset = (unsigned long)head->func;
186 
187         rcu_lock_acquire(&rcu_callback_map);
188         if (__is_kfree_rcu_offset(offset)) {
189                 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
190                 kfree((void *)head - offset);
191                 rcu_lock_release(&rcu_callback_map);
192                 return true;
193         } else {
194                 RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
195                 head->func(head);
196                 rcu_lock_release(&rcu_callback_map);
197                 return false;
198         }
199 }
200 
201 #ifdef CONFIG_RCU_STALL_COMMON
202 
203 extern int rcu_cpu_stall_suppress;
204 int rcu_jiffies_till_stall_check(void);
205 
206 #define rcu_ftrace_dump_stall_suppress() \
207 do { \
208         if (!rcu_cpu_stall_suppress) \
209                 rcu_cpu_stall_suppress = 3; \
210 } while (0)
211 
212 #define rcu_ftrace_dump_stall_unsuppress() \
213 do { \
214         if (rcu_cpu_stall_suppress == 3) \
215                 rcu_cpu_stall_suppress = 0; \
216 } while (0)
217 
218 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
219 #define rcu_ftrace_dump_stall_suppress()
220 #define rcu_ftrace_dump_stall_unsuppress()
221 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
222 
223 /*
224  * Strings used in tracepoints need to be exported via the
225  * tracing system such that tools like perf and trace-cmd can
226  * translate the string address pointers to actual text.
227  */
228 #define TPS(x)  tracepoint_string(x)
229 
230 /*
231  * Dump the ftrace buffer, but only one time per callsite per boot.
232  */
233 #define rcu_ftrace_dump(oops_dump_mode) \
234 do { \
235         static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
236         \
237         if (!atomic_read(&___rfd_beenhere) && \
238             !atomic_xchg(&___rfd_beenhere, 1)) { \
239                 tracing_off(); \
240                 rcu_ftrace_dump_stall_suppress(); \
241                 ftrace_dump(oops_dump_mode); \
242                 rcu_ftrace_dump_stall_unsuppress(); \
243         } \
244 } while (0)
245 
246 void rcu_early_boot_tests(void);
247 void rcu_test_sync_prims(void);
248 
249 /*
250  * This function really isn't for public consumption, but RCU is special in
251  * that context switches can allow the state machine to make progress.
252  */
253 extern void resched_cpu(int cpu);
254 
255 #if defined(SRCU) || !defined(TINY_RCU)
256 
257 #include <linux/rcu_node_tree.h>
258 
259 extern int rcu_num_lvls;
260 extern int num_rcu_lvl[];
261 extern int rcu_num_nodes;
262 static bool rcu_fanout_exact;
263 static int rcu_fanout_leaf;
264 
265 /*
266  * Compute the per-level fanout, either using the exact fanout specified
267  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
268  */
269 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
270 {
271         int i;
272 
273         if (rcu_fanout_exact) {
274                 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
275                 for (i = rcu_num_lvls - 2; i >= 0; i--)
276                         levelspread[i] = RCU_FANOUT;
277         } else {
278                 int ccur;
279                 int cprv;
280 
281                 cprv = nr_cpu_ids;
282                 for (i = rcu_num_lvls - 1; i >= 0; i--) {
283                         ccur = levelcnt[i];
284                         levelspread[i] = (cprv + ccur - 1) / ccur;
285                         cprv = ccur;
286                 }
287         }
288 }
289 
290 /*
291  * Do a full breadth-first scan of the rcu_node structures for the
292  * specified rcu_state structure.
293  */
294 #define rcu_for_each_node_breadth_first(rsp, rnp) \
295         for ((rnp) = &(rsp)->node[0]; \
296              (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
297 
298 /*
299  * Do a breadth-first scan of the non-leaf rcu_node structures for the
300  * specified rcu_state structure.  Note that if there is a singleton
301  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
302  */
303 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
304         for ((rnp) = &(rsp)->node[0]; \
305              (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
306 
307 /*
308  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
309  * structure.  Note that if there is a singleton rcu_node tree with but
310  * one rcu_node structure, this loop -will- visit the rcu_node structure.
311  * It is still a leaf node, even if it is also the root node.
312  */
313 #define rcu_for_each_leaf_node(rsp, rnp) \
314         for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
315              (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
316 
317 /*
318  * Iterate over all possible CPUs in a leaf RCU node.
319  */
320 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
321         for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
322              cpu <= rnp->grphi; \
323              cpu = cpumask_next((cpu), cpu_possible_mask))
324 
325 /*
326  * Wrappers for the rcu_node::lock acquire and release.
327  *
328  * Because the rcu_nodes form a tree, the tree traversal locking will observe
329  * different lock values, this in turn means that an UNLOCK of one level
330  * followed by a LOCK of another level does not imply a full memory barrier;
331  * and most importantly transitivity is lost.
332  *
333  * In order to restore full ordering between tree levels, augment the regular
334  * lock acquire functions with smp_mb__after_unlock_lock().
335  *
336  * As ->lock of struct rcu_node is a __private field, therefore one should use
337  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
338  */
339 #define raw_spin_lock_rcu_node(p)                                       \
340 do {                                                                    \
341         raw_spin_lock(&ACCESS_PRIVATE(p, lock));                        \
342         smp_mb__after_unlock_lock();                                    \
343 } while (0)
344 
345 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
346 
347 #define raw_spin_lock_irq_rcu_node(p)                                   \
348 do {                                                                    \
349         raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));                    \
350         smp_mb__after_unlock_lock();                                    \
351 } while (0)
352 
353 #define raw_spin_unlock_irq_rcu_node(p)                                 \
354         raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
355 
356 #define raw_spin_lock_irqsave_rcu_node(p, flags)                        \
357 do {                                                                    \
358         raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
359         smp_mb__after_unlock_lock();                                    \
360 } while (0)
361 
362 #define raw_spin_unlock_irqrestore_rcu_node(p, flags)                   \
363         raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)     \
364 
365 #define raw_spin_trylock_rcu_node(p)                                    \
366 ({                                                                      \
367         bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));    \
368                                                                         \
369         if (___locked)                                                  \
370                 smp_mb__after_unlock_lock();                            \
371         ___locked;                                                      \
372 })
373 
374 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
375 
376 #ifdef CONFIG_TINY_RCU
377 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
378 static inline bool rcu_gp_is_normal(void) { return true; }
379 static inline bool rcu_gp_is_expedited(void) { return false; }
380 static inline void rcu_expedite_gp(void) { }
381 static inline void rcu_unexpedite_gp(void) { }
382 #else /* #ifdef CONFIG_TINY_RCU */
383 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
384 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
385 void rcu_expedite_gp(void);
386 void rcu_unexpedite_gp(void);
387 void rcupdate_announce_bootup_oddness(void);
388 #endif /* #else #ifdef CONFIG_TINY_RCU */
389 
390 #define RCU_SCHEDULER_INACTIVE  0
391 #define RCU_SCHEDULER_INIT      1
392 #define RCU_SCHEDULER_RUNNING   2
393 
394 #ifdef CONFIG_TINY_RCU
395 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
396 #else /* #ifdef CONFIG_TINY_RCU */
397 void rcu_request_urgent_qs_task(struct task_struct *t);
398 #endif /* #else #ifdef CONFIG_TINY_RCU */
399 
400 enum rcutorture_type {
401         RCU_FLAVOR,
402         RCU_BH_FLAVOR,
403         RCU_SCHED_FLAVOR,
404         RCU_TASKS_FLAVOR,
405         SRCU_FLAVOR,
406         INVALID_RCU_FLAVOR
407 };
408 
409 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
410 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
411                             unsigned long *gpnum, unsigned long *completed);
412 void rcutorture_record_test_transition(void);
413 void rcutorture_record_progress(unsigned long vernum);
414 void do_trace_rcu_torture_read(const char *rcutorturename,
415                                struct rcu_head *rhp,
416                                unsigned long secs,
417                                unsigned long c_old,
418                                unsigned long c);
419 #else
420 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
421                                           int *flags,
422                                           unsigned long *gpnum,
423                                           unsigned long *completed)
424 {
425         *flags = 0;
426         *gpnum = 0;
427         *completed = 0;
428 }
429 static inline void rcutorture_record_test_transition(void) { }
430 static inline void rcutorture_record_progress(unsigned long vernum) { }
431 #ifdef CONFIG_RCU_TRACE
432 void do_trace_rcu_torture_read(const char *rcutorturename,
433                                struct rcu_head *rhp,
434                                unsigned long secs,
435                                unsigned long c_old,
436                                unsigned long c);
437 #else
438 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
439         do { } while (0)
440 #endif
441 #endif
442 
443 #ifdef CONFIG_TINY_SRCU
444 
445 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
446                                            struct srcu_struct *sp, int *flags,
447                                            unsigned long *gpnum,
448                                            unsigned long *completed)
449 {
450         if (test_type != SRCU_FLAVOR)
451                 return;
452         *flags = 0;
453         *completed = sp->srcu_idx;
454         *gpnum = *completed;
455 }
456 
457 #elif defined(CONFIG_TREE_SRCU)
458 
459 void srcutorture_get_gp_data(enum rcutorture_type test_type,
460                              struct srcu_struct *sp, int *flags,
461                              unsigned long *gpnum, unsigned long *completed);
462 
463 #endif
464 
465 #ifdef CONFIG_TINY_RCU
466 static inline unsigned long rcu_batches_started(void) { return 0; }
467 static inline unsigned long rcu_batches_started_bh(void) { return 0; }
468 static inline unsigned long rcu_batches_started_sched(void) { return 0; }
469 static inline unsigned long rcu_batches_completed(void) { return 0; }
470 static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
471 static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
472 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
473 static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
474 static inline unsigned long
475 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
476 static inline void rcu_force_quiescent_state(void) { }
477 static inline void rcu_bh_force_quiescent_state(void) { }
478 static inline void rcu_sched_force_quiescent_state(void) { }
479 static inline void show_rcu_gp_kthreads(void) { }
480 #else /* #ifdef CONFIG_TINY_RCU */
481 extern unsigned long rcutorture_testseq;
482 extern unsigned long rcutorture_vernum;
483 unsigned long rcu_batches_started(void);
484 unsigned long rcu_batches_started_bh(void);
485 unsigned long rcu_batches_started_sched(void);
486 unsigned long rcu_batches_completed(void);
487 unsigned long rcu_batches_completed_bh(void);
488 unsigned long rcu_batches_completed_sched(void);
489 unsigned long rcu_exp_batches_completed(void);
490 unsigned long rcu_exp_batches_completed_sched(void);
491 unsigned long srcu_batches_completed(struct srcu_struct *sp);
492 void show_rcu_gp_kthreads(void);
493 void rcu_force_quiescent_state(void);
494 void rcu_bh_force_quiescent_state(void);
495 void rcu_sched_force_quiescent_state(void);
496 #endif /* #else #ifdef CONFIG_TINY_RCU */
497 
498 #ifdef CONFIG_RCU_NOCB_CPU
499 bool rcu_is_nocb_cpu(int cpu);
500 #else
501 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
502 #endif
503 
504 #endif /* __LINUX_RCU_H */
505 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp