~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/slab_common.c

Version: ~ [ linux-5.2-rc4 ] ~ [ linux-5.1.9 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.50 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.125 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.181 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.181 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.68 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Slab allocator functions that are independent of the allocator strategy
  3  *
  4  * (C) 2012 Christoph Lameter <cl@linux.com>
  5  */
  6 #include <linux/slab.h>
  7 
  8 #include <linux/mm.h>
  9 #include <linux/poison.h>
 10 #include <linux/interrupt.h>
 11 #include <linux/memory.h>
 12 #include <linux/compiler.h>
 13 #include <linux/module.h>
 14 #include <linux/cpu.h>
 15 #include <linux/uaccess.h>
 16 #include <linux/seq_file.h>
 17 #include <linux/proc_fs.h>
 18 #include <asm/cacheflush.h>
 19 #include <asm/tlbflush.h>
 20 #include <asm/page.h>
 21 #include <linux/memcontrol.h>
 22 
 23 #include "slab.h"
 24 
 25 enum slab_state slab_state;
 26 LIST_HEAD(slab_caches);
 27 DEFINE_MUTEX(slab_mutex);
 28 struct kmem_cache *kmem_cache;
 29 
 30 #ifdef CONFIG_DEBUG_VM
 31 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
 32                                    size_t size)
 33 {
 34         struct kmem_cache *s = NULL;
 35 
 36         if (!name || in_interrupt() || size < sizeof(void *) ||
 37                 size > KMALLOC_MAX_SIZE) {
 38                 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
 39                 return -EINVAL;
 40         }
 41 
 42         list_for_each_entry(s, &slab_caches, list) {
 43                 char tmp;
 44                 int res;
 45 
 46                 /*
 47                  * This happens when the module gets unloaded and doesn't
 48                  * destroy its slab cache and no-one else reuses the vmalloc
 49                  * area of the module.  Print a warning.
 50                  */
 51                 res = probe_kernel_address(s->name, tmp);
 52                 if (res) {
 53                         pr_err("Slab cache with size %d has lost its name\n",
 54                                s->object_size);
 55                         continue;
 56                 }
 57 
 58                 /*
 59                  * For simplicity, we won't check this in the list of memcg
 60                  * caches. We have control over memcg naming, and if there
 61                  * aren't duplicates in the global list, there won't be any
 62                  * duplicates in the memcg lists as well.
 63                  */
 64                 if (!memcg && !strcmp(s->name, name)) {
 65                         pr_err("%s (%s): Cache name already exists.\n",
 66                                __func__, name);
 67                         dump_stack();
 68                         s = NULL;
 69                         return -EINVAL;
 70                 }
 71         }
 72 
 73         WARN_ON(strchr(name, ' '));     /* It confuses parsers */
 74         return 0;
 75 }
 76 #else
 77 static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
 78                                           const char *name, size_t size)
 79 {
 80         return 0;
 81 }
 82 #endif
 83 
 84 #ifdef CONFIG_MEMCG_KMEM
 85 int memcg_update_all_caches(int num_memcgs)
 86 {
 87         struct kmem_cache *s;
 88         int ret = 0;
 89         mutex_lock(&slab_mutex);
 90 
 91         list_for_each_entry(s, &slab_caches, list) {
 92                 if (!is_root_cache(s))
 93                         continue;
 94 
 95                 ret = memcg_update_cache_size(s, num_memcgs);
 96                 /*
 97                  * See comment in memcontrol.c, memcg_update_cache_size:
 98                  * Instead of freeing the memory, we'll just leave the caches
 99                  * up to this point in an updated state.
100                  */
101                 if (ret)
102                         goto out;
103         }
104 
105         memcg_update_array_size(num_memcgs);
106 out:
107         mutex_unlock(&slab_mutex);
108         return ret;
109 }
110 #endif
111 
112 /*
113  * Figure out what the alignment of the objects will be given a set of
114  * flags, a user specified alignment and the size of the objects.
115  */
116 unsigned long calculate_alignment(unsigned long flags,
117                 unsigned long align, unsigned long size)
118 {
119         /*
120          * If the user wants hardware cache aligned objects then follow that
121          * suggestion if the object is sufficiently large.
122          *
123          * The hardware cache alignment cannot override the specified
124          * alignment though. If that is greater then use it.
125          */
126         if (flags & SLAB_HWCACHE_ALIGN) {
127                 unsigned long ralign = cache_line_size();
128                 while (size <= ralign / 2)
129                         ralign /= 2;
130                 align = max(align, ralign);
131         }
132 
133         if (align < ARCH_SLAB_MINALIGN)
134                 align = ARCH_SLAB_MINALIGN;
135 
136         return ALIGN(align, sizeof(void *));
137 }
138 
139 
140 /*
141  * kmem_cache_create - Create a cache.
142  * @name: A string which is used in /proc/slabinfo to identify this cache.
143  * @size: The size of objects to be created in this cache.
144  * @align: The required alignment for the objects.
145  * @flags: SLAB flags
146  * @ctor: A constructor for the objects.
147  *
148  * Returns a ptr to the cache on success, NULL on failure.
149  * Cannot be called within a interrupt, but can be interrupted.
150  * The @ctor is run when new pages are allocated by the cache.
151  *
152  * The flags are
153  *
154  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
155  * to catch references to uninitialised memory.
156  *
157  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
158  * for buffer overruns.
159  *
160  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
161  * cacheline.  This can be beneficial if you're counting cycles as closely
162  * as davem.
163  */
164 
165 struct kmem_cache *
166 kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
167                         size_t align, unsigned long flags, void (*ctor)(void *),
168                         struct kmem_cache *parent_cache)
169 {
170         struct kmem_cache *s = NULL;
171         int err = 0;
172 
173         get_online_cpus();
174         mutex_lock(&slab_mutex);
175 
176         if (!kmem_cache_sanity_check(memcg, name, size) == 0)
177                 goto out_locked;
178 
179         /*
180          * Some allocators will constraint the set of valid flags to a subset
181          * of all flags. We expect them to define CACHE_CREATE_MASK in this
182          * case, and we'll just provide them with a sanitized version of the
183          * passed flags.
184          */
185         flags &= CACHE_CREATE_MASK;
186 
187         s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
188         if (s)
189                 goto out_locked;
190 
191         s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
192         if (s) {
193                 s->object_size = s->size = size;
194                 s->align = calculate_alignment(flags, align, size);
195                 s->ctor = ctor;
196 
197                 if (memcg_register_cache(memcg, s, parent_cache)) {
198                         kmem_cache_free(kmem_cache, s);
199                         err = -ENOMEM;
200                         goto out_locked;
201                 }
202 
203                 s->name = kstrdup(name, GFP_KERNEL);
204                 if (!s->name) {
205                         kmem_cache_free(kmem_cache, s);
206                         err = -ENOMEM;
207                         goto out_locked;
208                 }
209 
210                 err = __kmem_cache_create(s, flags);
211                 if (!err) {
212                         s->refcount = 1;
213                         list_add(&s->list, &slab_caches);
214                         memcg_cache_list_add(memcg, s);
215                 } else {
216                         kfree(s->name);
217                         kmem_cache_free(kmem_cache, s);
218                 }
219         } else
220                 err = -ENOMEM;
221 
222 out_locked:
223         mutex_unlock(&slab_mutex);
224         put_online_cpus();
225 
226         if (err) {
227 
228                 if (flags & SLAB_PANIC)
229                         panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
230                                 name, err);
231                 else {
232                         printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
233                                 name, err);
234                         dump_stack();
235                 }
236 
237                 return NULL;
238         }
239 
240         return s;
241 }
242 
243 struct kmem_cache *
244 kmem_cache_create(const char *name, size_t size, size_t align,
245                   unsigned long flags, void (*ctor)(void *))
246 {
247         return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
248 }
249 EXPORT_SYMBOL(kmem_cache_create);
250 
251 void kmem_cache_destroy(struct kmem_cache *s)
252 {
253         /* Destroy all the children caches if we aren't a memcg cache */
254         kmem_cache_destroy_memcg_children(s);
255 
256         get_online_cpus();
257         mutex_lock(&slab_mutex);
258         s->refcount--;
259         if (!s->refcount) {
260                 list_del(&s->list);
261 
262                 if (!__kmem_cache_shutdown(s)) {
263                         mutex_unlock(&slab_mutex);
264                         if (s->flags & SLAB_DESTROY_BY_RCU)
265                                 rcu_barrier();
266 
267                         memcg_release_cache(s);
268                         kfree(s->name);
269                         kmem_cache_free(kmem_cache, s);
270                 } else {
271                         list_add(&s->list, &slab_caches);
272                         mutex_unlock(&slab_mutex);
273                         printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
274                                 s->name);
275                         dump_stack();
276                 }
277         } else {
278                 mutex_unlock(&slab_mutex);
279         }
280         put_online_cpus();
281 }
282 EXPORT_SYMBOL(kmem_cache_destroy);
283 
284 int slab_is_available(void)
285 {
286         return slab_state >= UP;
287 }
288 
289 #ifndef CONFIG_SLOB
290 /* Create a cache during boot when no slab services are available yet */
291 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
292                 unsigned long flags)
293 {
294         int err;
295 
296         s->name = name;
297         s->size = s->object_size = size;
298         s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
299         err = __kmem_cache_create(s, flags);
300 
301         if (err)
302                 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
303                                         name, size, err);
304 
305         s->refcount = -1;       /* Exempt from merging for now */
306 }
307 
308 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
309                                 unsigned long flags)
310 {
311         struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
312 
313         if (!s)
314                 panic("Out of memory when creating slab %s\n", name);
315 
316         create_boot_cache(s, name, size, flags);
317         list_add(&s->list, &slab_caches);
318         s->refcount = 1;
319         return s;
320 }
321 
322 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
323 EXPORT_SYMBOL(kmalloc_caches);
324 
325 #ifdef CONFIG_ZONE_DMA
326 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
327 EXPORT_SYMBOL(kmalloc_dma_caches);
328 #endif
329 
330 /*
331  * Conversion table for small slabs sizes / 8 to the index in the
332  * kmalloc array. This is necessary for slabs < 192 since we have non power
333  * of two cache sizes there. The size of larger slabs can be determined using
334  * fls.
335  */
336 static s8 size_index[24] = {
337         3,      /* 8 */
338         4,      /* 16 */
339         5,      /* 24 */
340         5,      /* 32 */
341         6,      /* 40 */
342         6,      /* 48 */
343         6,      /* 56 */
344         6,      /* 64 */
345         1,      /* 72 */
346         1,      /* 80 */
347         1,      /* 88 */
348         1,      /* 96 */
349         7,      /* 104 */
350         7,      /* 112 */
351         7,      /* 120 */
352         7,      /* 128 */
353         2,      /* 136 */
354         2,      /* 144 */
355         2,      /* 152 */
356         2,      /* 160 */
357         2,      /* 168 */
358         2,      /* 176 */
359         2,      /* 184 */
360         2       /* 192 */
361 };
362 
363 static inline int size_index_elem(size_t bytes)
364 {
365         return (bytes - 1) / 8;
366 }
367 
368 /*
369  * Find the kmem_cache structure that serves a given size of
370  * allocation
371  */
372 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
373 {
374         int index;
375 
376         if (size > KMALLOC_MAX_SIZE) {
377                 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
378                 return NULL;
379         }
380 
381         if (size <= 192) {
382                 if (!size)
383                         return ZERO_SIZE_PTR;
384 
385                 index = size_index[size_index_elem(size)];
386         } else
387                 index = fls(size - 1);
388 
389 #ifdef CONFIG_ZONE_DMA
390         if (unlikely((flags & GFP_DMA)))
391                 return kmalloc_dma_caches[index];
392 
393 #endif
394         return kmalloc_caches[index];
395 }
396 
397 /*
398  * Create the kmalloc array. Some of the regular kmalloc arrays
399  * may already have been created because they were needed to
400  * enable allocations for slab creation.
401  */
402 void __init create_kmalloc_caches(unsigned long flags)
403 {
404         int i;
405 
406         /*
407          * Patch up the size_index table if we have strange large alignment
408          * requirements for the kmalloc array. This is only the case for
409          * MIPS it seems. The standard arches will not generate any code here.
410          *
411          * Largest permitted alignment is 256 bytes due to the way we
412          * handle the index determination for the smaller caches.
413          *
414          * Make sure that nothing crazy happens if someone starts tinkering
415          * around with ARCH_KMALLOC_MINALIGN
416          */
417         BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
418                 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
419 
420         for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
421                 int elem = size_index_elem(i);
422 
423                 if (elem >= ARRAY_SIZE(size_index))
424                         break;
425                 size_index[elem] = KMALLOC_SHIFT_LOW;
426         }
427 
428         if (KMALLOC_MIN_SIZE >= 64) {
429                 /*
430                  * The 96 byte size cache is not used if the alignment
431                  * is 64 byte.
432                  */
433                 for (i = 64 + 8; i <= 96; i += 8)
434                         size_index[size_index_elem(i)] = 7;
435 
436         }
437 
438         if (KMALLOC_MIN_SIZE >= 128) {
439                 /*
440                  * The 192 byte sized cache is not used if the alignment
441                  * is 128 byte. Redirect kmalloc to use the 256 byte cache
442                  * instead.
443                  */
444                 for (i = 128 + 8; i <= 192; i += 8)
445                         size_index[size_index_elem(i)] = 8;
446         }
447         for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
448                 if (!kmalloc_caches[i]) {
449                         kmalloc_caches[i] = create_kmalloc_cache(NULL,
450                                                         1 << i, flags);
451                 }
452 
453                 /*
454                  * Caches that are not of the two-to-the-power-of size.
455                  * These have to be created immediately after the
456                  * earlier power of two caches
457                  */
458                 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
459                         kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
460 
461                 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
462                         kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
463         }
464 
465         /* Kmalloc array is now usable */
466         slab_state = UP;
467 
468         for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
469                 struct kmem_cache *s = kmalloc_caches[i];
470                 char *n;
471 
472                 if (s) {
473                         n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
474 
475                         BUG_ON(!n);
476                         s->name = n;
477                 }
478         }
479 
480 #ifdef CONFIG_ZONE_DMA
481         for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
482                 struct kmem_cache *s = kmalloc_caches[i];
483 
484                 if (s) {
485                         int size = kmalloc_size(i);
486                         char *n = kasprintf(GFP_NOWAIT,
487                                  "dma-kmalloc-%d", size);
488 
489                         BUG_ON(!n);
490                         kmalloc_dma_caches[i] = create_kmalloc_cache(n,
491                                 size, SLAB_CACHE_DMA | flags);
492                 }
493         }
494 #endif
495 }
496 #endif /* !CONFIG_SLOB */
497 
498 
499 #ifdef CONFIG_SLABINFO
500 
501 #ifdef CONFIG_SLAB
502 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
503 #else
504 #define SLABINFO_RIGHTS S_IRUSR
505 #endif
506 
507 void print_slabinfo_header(struct seq_file *m)
508 {
509         /*
510          * Output format version, so at least we can change it
511          * without _too_ many complaints.
512          */
513 #ifdef CONFIG_DEBUG_SLAB
514         seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
515 #else
516         seq_puts(m, "slabinfo - version: 2.1\n");
517 #endif
518         seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
519                  "<objperslab> <pagesperslab>");
520         seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
521         seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
522 #ifdef CONFIG_DEBUG_SLAB
523         seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
524                  "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
525         seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
526 #endif
527         seq_putc(m, '\n');
528 }
529 
530 static void *s_start(struct seq_file *m, loff_t *pos)
531 {
532         loff_t n = *pos;
533 
534         mutex_lock(&slab_mutex);
535         if (!n)
536                 print_slabinfo_header(m);
537 
538         return seq_list_start(&slab_caches, *pos);
539 }
540 
541 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
542 {
543         return seq_list_next(p, &slab_caches, pos);
544 }
545 
546 void slab_stop(struct seq_file *m, void *p)
547 {
548         mutex_unlock(&slab_mutex);
549 }
550 
551 static void
552 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
553 {
554         struct kmem_cache *c;
555         struct slabinfo sinfo;
556         int i;
557 
558         if (!is_root_cache(s))
559                 return;
560 
561         for_each_memcg_cache_index(i) {
562                 c = cache_from_memcg(s, i);
563                 if (!c)
564                         continue;
565 
566                 memset(&sinfo, 0, sizeof(sinfo));
567                 get_slabinfo(c, &sinfo);
568 
569                 info->active_slabs += sinfo.active_slabs;
570                 info->num_slabs += sinfo.num_slabs;
571                 info->shared_avail += sinfo.shared_avail;
572                 info->active_objs += sinfo.active_objs;
573                 info->num_objs += sinfo.num_objs;
574         }
575 }
576 
577 int cache_show(struct kmem_cache *s, struct seq_file *m)
578 {
579         struct slabinfo sinfo;
580 
581         memset(&sinfo, 0, sizeof(sinfo));
582         get_slabinfo(s, &sinfo);
583 
584         memcg_accumulate_slabinfo(s, &sinfo);
585 
586         seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
587                    cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
588                    sinfo.objects_per_slab, (1 << sinfo.cache_order));
589 
590         seq_printf(m, " : tunables %4u %4u %4u",
591                    sinfo.limit, sinfo.batchcount, sinfo.shared);
592         seq_printf(m, " : slabdata %6lu %6lu %6lu",
593                    sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
594         slabinfo_show_stats(m, s);
595         seq_putc(m, '\n');
596         return 0;
597 }
598 
599 static int s_show(struct seq_file *m, void *p)
600 {
601         struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
602 
603         if (!is_root_cache(s))
604                 return 0;
605         return cache_show(s, m);
606 }
607 
608 /*
609  * slabinfo_op - iterator that generates /proc/slabinfo
610  *
611  * Output layout:
612  * cache-name
613  * num-active-objs
614  * total-objs
615  * object size
616  * num-active-slabs
617  * total-slabs
618  * num-pages-per-slab
619  * + further values on SMP and with statistics enabled
620  */
621 static const struct seq_operations slabinfo_op = {
622         .start = s_start,
623         .next = slab_next,
624         .stop = slab_stop,
625         .show = s_show,
626 };
627 
628 static int slabinfo_open(struct inode *inode, struct file *file)
629 {
630         return seq_open(file, &slabinfo_op);
631 }
632 
633 static const struct file_operations proc_slabinfo_operations = {
634         .open           = slabinfo_open,
635         .read           = seq_read,
636         .write          = slabinfo_write,
637         .llseek         = seq_lseek,
638         .release        = seq_release,
639 };
640 
641 static int __init slab_proc_init(void)
642 {
643         proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
644                                                 &proc_slabinfo_operations);
645         return 0;
646 }
647 module_init(slab_proc_init);
648 #endif /* CONFIG_SLABINFO */
649 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp