~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/pipe.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/fs/pipe.c
  3  *
  4  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
  5  */
  6 
  7 #include <linux/mm.h>
  8 #include <linux/file.h>
  9 #include <linux/poll.h>
 10 #include <linux/slab.h>
 11 #include <linux/module.h>
 12 #include <linux/init.h>
 13 #include <linux/fs.h>
 14 #include <linux/log2.h>
 15 #include <linux/mount.h>
 16 #include <linux/magic.h>
 17 #include <linux/pipe_fs_i.h>
 18 #include <linux/uio.h>
 19 #include <linux/highmem.h>
 20 #include <linux/pagemap.h>
 21 #include <linux/audit.h>
 22 #include <linux/syscalls.h>
 23 #include <linux/fcntl.h>
 24 #include <linux/aio.h>
 25 
 26 #include <asm/uaccess.h>
 27 #include <asm/ioctls.h>
 28 
 29 #include "internal.h"
 30 
 31 /*
 32  * The max size that a non-root user is allowed to grow the pipe. Can
 33  * be set by root in /proc/sys/fs/pipe-max-size
 34  */
 35 unsigned int pipe_max_size = 1048576;
 36 
 37 /*
 38  * Minimum pipe size, as required by POSIX
 39  */
 40 unsigned int pipe_min_size = PAGE_SIZE;
 41 
 42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
 43  * matches default values.
 44  */
 45 unsigned long pipe_user_pages_hard;
 46 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
 47 
 48 /*
 49  * We use a start+len construction, which provides full use of the 
 50  * allocated memory.
 51  * -- Florian Coosmann (FGC)
 52  * 
 53  * Reads with count = 0 should always return 0.
 54  * -- Julian Bradfield 1999-06-07.
 55  *
 56  * FIFOs and Pipes now generate SIGIO for both readers and writers.
 57  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
 58  *
 59  * pipe_read & write cleanup
 60  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
 61  */
 62 
 63 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
 64 {
 65         if (pipe->files)
 66                 mutex_lock_nested(&pipe->mutex, subclass);
 67 }
 68 
 69 void pipe_lock(struct pipe_inode_info *pipe)
 70 {
 71         /*
 72          * pipe_lock() nests non-pipe inode locks (for writing to a file)
 73          */
 74         pipe_lock_nested(pipe, I_MUTEX_PARENT);
 75 }
 76 EXPORT_SYMBOL(pipe_lock);
 77 
 78 void pipe_unlock(struct pipe_inode_info *pipe)
 79 {
 80         if (pipe->files)
 81                 mutex_unlock(&pipe->mutex);
 82 }
 83 EXPORT_SYMBOL(pipe_unlock);
 84 
 85 static inline void __pipe_lock(struct pipe_inode_info *pipe)
 86 {
 87         mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
 88 }
 89 
 90 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
 91 {
 92         mutex_unlock(&pipe->mutex);
 93 }
 94 
 95 void pipe_double_lock(struct pipe_inode_info *pipe1,
 96                       struct pipe_inode_info *pipe2)
 97 {
 98         BUG_ON(pipe1 == pipe2);
 99 
100         if (pipe1 < pipe2) {
101                 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
102                 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
103         } else {
104                 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
105                 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
106         }
107 }
108 
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info *pipe)
111 {
112         DEFINE_WAIT(wait);
113 
114         /*
115          * Pipes are system-local resources, so sleeping on them
116          * is considered a noninteractive wait:
117          */
118         prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
119         pipe_unlock(pipe);
120         schedule();
121         finish_wait(&pipe->wait, &wait);
122         pipe_lock(pipe);
123 }
124 
125 static int
126 pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
127                         size_t *remaining, int atomic)
128 {
129         unsigned long copy;
130 
131         while (*remaining > 0) {
132                 while (!iov->iov_len)
133                         iov++;
134                 copy = min_t(unsigned long, *remaining, iov->iov_len);
135 
136                 if (atomic) {
137                         if (__copy_from_user_inatomic(addr + *offset,
138                                                       iov->iov_base, copy))
139                                 return -EFAULT;
140                 } else {
141                         if (copy_from_user(addr + *offset,
142                                            iov->iov_base, copy))
143                                 return -EFAULT;
144                 }
145                 *offset += copy;
146                 *remaining -= copy;
147                 iov->iov_base += copy;
148                 iov->iov_len -= copy;
149         }
150         return 0;
151 }
152 
153 static int
154 pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
155                       size_t *remaining, int atomic)
156 {
157         unsigned long copy;
158 
159         while (*remaining > 0) {
160                 while (!iov->iov_len)
161                         iov++;
162                 copy = min_t(unsigned long, *remaining, iov->iov_len);
163 
164                 if (atomic) {
165                         if (__copy_to_user_inatomic(iov->iov_base,
166                                                     addr + *offset, copy))
167                                 return -EFAULT;
168                 } else {
169                         if (copy_to_user(iov->iov_base,
170                                          addr + *offset, copy))
171                                 return -EFAULT;
172                 }
173                 *offset += copy;
174                 *remaining -= copy;
175                 iov->iov_base += copy;
176                 iov->iov_len -= copy;
177         }
178         return 0;
179 }
180 
181 /*
182  * Attempt to pre-fault in the user memory, so we can use atomic copies.
183  * Returns the number of bytes not faulted in.
184  */
185 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
186 {
187         while (!iov->iov_len)
188                 iov++;
189 
190         while (len > 0) {
191                 unsigned long this_len;
192 
193                 this_len = min_t(unsigned long, len, iov->iov_len);
194                 if (fault_in_pages_writeable(iov->iov_base, this_len))
195                         break;
196 
197                 len -= this_len;
198                 iov++;
199         }
200 
201         return len;
202 }
203 
204 /*
205  * Pre-fault in the user memory, so we can use atomic copies.
206  */
207 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
208 {
209         while (!iov->iov_len)
210                 iov++;
211 
212         while (len > 0) {
213                 unsigned long this_len;
214 
215                 this_len = min_t(unsigned long, len, iov->iov_len);
216                 fault_in_pages_readable(iov->iov_base, this_len);
217                 len -= this_len;
218                 iov++;
219         }
220 }
221 
222 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
223                                   struct pipe_buffer *buf)
224 {
225         struct page *page = buf->page;
226 
227         /*
228          * If nobody else uses this page, and we don't already have a
229          * temporary page, let's keep track of it as a one-deep
230          * allocation cache. (Otherwise just release our reference to it)
231          */
232         if (page_count(page) == 1 && !pipe->tmp_page)
233                 pipe->tmp_page = page;
234         else
235                 page_cache_release(page);
236 }
237 
238 /**
239  * generic_pipe_buf_map - virtually map a pipe buffer
240  * @pipe:       the pipe that the buffer belongs to
241  * @buf:        the buffer that should be mapped
242  * @atomic:     whether to use an atomic map
243  *
244  * Description:
245  *      This function returns a kernel virtual address mapping for the
246  *      pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
247  *      and the caller has to be careful not to fault before calling
248  *      the unmap function.
249  *
250  *      Note that this function calls kmap_atomic() if @atomic != 0.
251  */
252 void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
253                            struct pipe_buffer *buf, int atomic)
254 {
255         if (atomic) {
256                 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
257                 return kmap_atomic(buf->page);
258         }
259 
260         return kmap(buf->page);
261 }
262 EXPORT_SYMBOL(generic_pipe_buf_map);
263 
264 /**
265  * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
266  * @pipe:       the pipe that the buffer belongs to
267  * @buf:        the buffer that should be unmapped
268  * @map_data:   the data that the mapping function returned
269  *
270  * Description:
271  *      This function undoes the mapping that ->map() provided.
272  */
273 void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
274                             struct pipe_buffer *buf, void *map_data)
275 {
276         if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
277                 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
278                 kunmap_atomic(map_data);
279         } else
280                 kunmap(buf->page);
281 }
282 EXPORT_SYMBOL(generic_pipe_buf_unmap);
283 
284 /**
285  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
286  * @pipe:       the pipe that the buffer belongs to
287  * @buf:        the buffer to attempt to steal
288  *
289  * Description:
290  *      This function attempts to steal the &struct page attached to
291  *      @buf. If successful, this function returns 0 and returns with
292  *      the page locked. The caller may then reuse the page for whatever
293  *      he wishes; the typical use is insertion into a different file
294  *      page cache.
295  */
296 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
297                            struct pipe_buffer *buf)
298 {
299         struct page *page = buf->page;
300 
301         /*
302          * A reference of one is golden, that means that the owner of this
303          * page is the only one holding a reference to it. lock the page
304          * and return OK.
305          */
306         if (page_count(page) == 1) {
307                 lock_page(page);
308                 return 0;
309         }
310 
311         return 1;
312 }
313 EXPORT_SYMBOL(generic_pipe_buf_steal);
314 
315 /**
316  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
317  * @pipe:       the pipe that the buffer belongs to
318  * @buf:        the buffer to get a reference to
319  *
320  * Description:
321  *      This function grabs an extra reference to @buf. It's used in
322  *      in the tee() system call, when we duplicate the buffers in one
323  *      pipe into another.
324  */
325 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
326 {
327         page_cache_get(buf->page);
328 }
329 EXPORT_SYMBOL(generic_pipe_buf_get);
330 
331 /**
332  * generic_pipe_buf_confirm - verify contents of the pipe buffer
333  * @info:       the pipe that the buffer belongs to
334  * @buf:        the buffer to confirm
335  *
336  * Description:
337  *      This function does nothing, because the generic pipe code uses
338  *      pages that are always good when inserted into the pipe.
339  */
340 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
341                              struct pipe_buffer *buf)
342 {
343         return 0;
344 }
345 EXPORT_SYMBOL(generic_pipe_buf_confirm);
346 
347 /**
348  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
349  * @pipe:       the pipe that the buffer belongs to
350  * @buf:        the buffer to put a reference to
351  *
352  * Description:
353  *      This function releases a reference to @buf.
354  */
355 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
356                               struct pipe_buffer *buf)
357 {
358         page_cache_release(buf->page);
359 }
360 EXPORT_SYMBOL(generic_pipe_buf_release);
361 
362 static const struct pipe_buf_operations anon_pipe_buf_ops = {
363         .can_merge = 1,
364         .map = generic_pipe_buf_map,
365         .unmap = generic_pipe_buf_unmap,
366         .confirm = generic_pipe_buf_confirm,
367         .release = anon_pipe_buf_release,
368         .steal = generic_pipe_buf_steal,
369         .get = generic_pipe_buf_get,
370 };
371 
372 static const struct pipe_buf_operations packet_pipe_buf_ops = {
373         .can_merge = 0,
374         .map = generic_pipe_buf_map,
375         .unmap = generic_pipe_buf_unmap,
376         .confirm = generic_pipe_buf_confirm,
377         .release = anon_pipe_buf_release,
378         .steal = generic_pipe_buf_steal,
379         .get = generic_pipe_buf_get,
380 };
381 
382 static ssize_t
383 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
384            unsigned long nr_segs, loff_t pos)
385 {
386         struct file *filp = iocb->ki_filp;
387         struct pipe_inode_info *pipe = filp->private_data;
388         int do_wakeup;
389         ssize_t ret;
390         struct iovec *iov = (struct iovec *)_iov;
391         size_t total_len;
392 
393         total_len = iov_length(iov, nr_segs);
394         /* Null read succeeds. */
395         if (unlikely(total_len == 0))
396                 return 0;
397 
398         do_wakeup = 0;
399         ret = 0;
400         __pipe_lock(pipe);
401         for (;;) {
402                 int bufs = pipe->nrbufs;
403                 if (bufs) {
404                         int curbuf = pipe->curbuf;
405                         struct pipe_buffer *buf = pipe->bufs + curbuf;
406                         const struct pipe_buf_operations *ops = buf->ops;
407                         void *addr;
408                         size_t chars = buf->len, remaining;
409                         int error, atomic;
410                         int offset;
411 
412                         if (chars > total_len)
413                                 chars = total_len;
414 
415                         error = ops->confirm(pipe, buf);
416                         if (error) {
417                                 if (!ret)
418                                         ret = error;
419                                 break;
420                         }
421 
422                         atomic = !iov_fault_in_pages_write(iov, chars);
423                         remaining = chars;
424                         offset = buf->offset;
425 redo:
426                         addr = ops->map(pipe, buf, atomic);
427                         error = pipe_iov_copy_to_user(iov, addr, &offset,
428                                                       &remaining, atomic);
429                         ops->unmap(pipe, buf, addr);
430                         if (unlikely(error)) {
431                                 /*
432                                  * Just retry with the slow path if we failed.
433                                  */
434                                 if (atomic) {
435                                         atomic = 0;
436                                         goto redo;
437                                 }
438                                 if (!ret)
439                                         ret = error;
440                                 break;
441                         }
442                         ret += chars;
443                         buf->offset += chars;
444                         buf->len -= chars;
445 
446                         /* Was it a packet buffer? Clean up and exit */
447                         if (buf->flags & PIPE_BUF_FLAG_PACKET) {
448                                 total_len = chars;
449                                 buf->len = 0;
450                         }
451 
452                         if (!buf->len) {
453                                 buf->ops = NULL;
454                                 ops->release(pipe, buf);
455                                 curbuf = (curbuf + 1) & (pipe->buffers - 1);
456                                 pipe->curbuf = curbuf;
457                                 pipe->nrbufs = --bufs;
458                                 do_wakeup = 1;
459                         }
460                         total_len -= chars;
461                         if (!total_len)
462                                 break;  /* common path: read succeeded */
463                 }
464                 if (bufs)       /* More to do? */
465                         continue;
466                 if (!pipe->writers)
467                         break;
468                 if (!pipe->waiting_writers) {
469                         /* syscall merging: Usually we must not sleep
470                          * if O_NONBLOCK is set, or if we got some data.
471                          * But if a writer sleeps in kernel space, then
472                          * we can wait for that data without violating POSIX.
473                          */
474                         if (ret)
475                                 break;
476                         if (filp->f_flags & O_NONBLOCK) {
477                                 ret = -EAGAIN;
478                                 break;
479                         }
480                 }
481                 if (signal_pending(current)) {
482                         if (!ret)
483                                 ret = -ERESTARTSYS;
484                         break;
485                 }
486                 if (do_wakeup) {
487                         wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
488                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
489                 }
490                 pipe_wait(pipe);
491         }
492         __pipe_unlock(pipe);
493 
494         /* Signal writers asynchronously that there is more room. */
495         if (do_wakeup) {
496                 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
497                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
498         }
499         if (ret > 0)
500                 file_accessed(filp);
501         return ret;
502 }
503 
504 static inline int is_packetized(struct file *file)
505 {
506         return (file->f_flags & O_DIRECT) != 0;
507 }
508 
509 static ssize_t
510 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
511             unsigned long nr_segs, loff_t ppos)
512 {
513         struct file *filp = iocb->ki_filp;
514         struct pipe_inode_info *pipe = filp->private_data;
515         ssize_t ret;
516         int do_wakeup;
517         struct iovec *iov = (struct iovec *)_iov;
518         size_t total_len;
519         ssize_t chars;
520 
521         total_len = iov_length(iov, nr_segs);
522         /* Null write succeeds. */
523         if (unlikely(total_len == 0))
524                 return 0;
525 
526         do_wakeup = 0;
527         ret = 0;
528         __pipe_lock(pipe);
529 
530         if (!pipe->readers) {
531                 send_sig(SIGPIPE, current, 0);
532                 ret = -EPIPE;
533                 goto out;
534         }
535 
536         /* We try to merge small writes */
537         chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
538         if (pipe->nrbufs && chars != 0) {
539                 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
540                                                         (pipe->buffers - 1);
541                 struct pipe_buffer *buf = pipe->bufs + lastbuf;
542                 const struct pipe_buf_operations *ops = buf->ops;
543                 int offset = buf->offset + buf->len;
544 
545                 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
546                         int error, atomic = 1;
547                         void *addr;
548                         size_t remaining = chars;
549 
550                         error = ops->confirm(pipe, buf);
551                         if (error)
552                                 goto out;
553 
554                         iov_fault_in_pages_read(iov, chars);
555 redo1:
556                         addr = ops->map(pipe, buf, atomic);
557                         error = pipe_iov_copy_from_user(addr, &offset, iov,
558                                                         &remaining, atomic);
559                         ops->unmap(pipe, buf, addr);
560                         ret = error;
561                         do_wakeup = 1;
562                         if (error) {
563                                 if (atomic) {
564                                         atomic = 0;
565                                         goto redo1;
566                                 }
567                                 goto out;
568                         }
569                         buf->len += chars;
570                         total_len -= chars;
571                         ret = chars;
572                         if (!total_len)
573                                 goto out;
574                 }
575         }
576 
577         for (;;) {
578                 int bufs;
579 
580                 if (!pipe->readers) {
581                         send_sig(SIGPIPE, current, 0);
582                         if (!ret)
583                                 ret = -EPIPE;
584                         break;
585                 }
586                 bufs = pipe->nrbufs;
587                 if (bufs < pipe->buffers) {
588                         int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
589                         struct pipe_buffer *buf = pipe->bufs + newbuf;
590                         struct page *page = pipe->tmp_page;
591                         char *src;
592                         int error, atomic = 1;
593                         int offset = 0;
594                         size_t remaining;
595 
596                         if (!page) {
597                                 page = alloc_page(GFP_HIGHUSER);
598                                 if (unlikely(!page)) {
599                                         ret = ret ? : -ENOMEM;
600                                         break;
601                                 }
602                                 pipe->tmp_page = page;
603                         }
604                         /* Always wake up, even if the copy fails. Otherwise
605                          * we lock up (O_NONBLOCK-)readers that sleep due to
606                          * syscall merging.
607                          * FIXME! Is this really true?
608                          */
609                         do_wakeup = 1;
610                         chars = PAGE_SIZE;
611                         if (chars > total_len)
612                                 chars = total_len;
613 
614                         iov_fault_in_pages_read(iov, chars);
615                         remaining = chars;
616 redo2:
617                         if (atomic)
618                                 src = kmap_atomic(page);
619                         else
620                                 src = kmap(page);
621 
622                         error = pipe_iov_copy_from_user(src, &offset, iov,
623                                                         &remaining, atomic);
624                         if (atomic)
625                                 kunmap_atomic(src);
626                         else
627                                 kunmap(page);
628 
629                         if (unlikely(error)) {
630                                 if (atomic) {
631                                         atomic = 0;
632                                         goto redo2;
633                                 }
634                                 if (!ret)
635                                         ret = error;
636                                 break;
637                         }
638                         ret += chars;
639 
640                         /* Insert it into the buffer array */
641                         buf->page = page;
642                         buf->ops = &anon_pipe_buf_ops;
643                         buf->offset = 0;
644                         buf->len = chars;
645                         buf->flags = 0;
646                         if (is_packetized(filp)) {
647                                 buf->ops = &packet_pipe_buf_ops;
648                                 buf->flags = PIPE_BUF_FLAG_PACKET;
649                         }
650                         pipe->nrbufs = ++bufs;
651                         pipe->tmp_page = NULL;
652 
653                         total_len -= chars;
654                         if (!total_len)
655                                 break;
656                 }
657                 if (bufs < pipe->buffers)
658                         continue;
659                 if (filp->f_flags & O_NONBLOCK) {
660                         if (!ret)
661                                 ret = -EAGAIN;
662                         break;
663                 }
664                 if (signal_pending(current)) {
665                         if (!ret)
666                                 ret = -ERESTARTSYS;
667                         break;
668                 }
669                 if (do_wakeup) {
670                         wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
671                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
672                         do_wakeup = 0;
673                 }
674                 pipe->waiting_writers++;
675                 pipe_wait(pipe);
676                 pipe->waiting_writers--;
677         }
678 out:
679         __pipe_unlock(pipe);
680         if (do_wakeup) {
681                 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
682                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
683         }
684         if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
685                 int err = file_update_time(filp);
686                 if (err)
687                         ret = err;
688                 sb_end_write(file_inode(filp)->i_sb);
689         }
690         return ret;
691 }
692 
693 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
694 {
695         struct pipe_inode_info *pipe = filp->private_data;
696         int count, buf, nrbufs;
697 
698         switch (cmd) {
699                 case FIONREAD:
700                         __pipe_lock(pipe);
701                         count = 0;
702                         buf = pipe->curbuf;
703                         nrbufs = pipe->nrbufs;
704                         while (--nrbufs >= 0) {
705                                 count += pipe->bufs[buf].len;
706                                 buf = (buf+1) & (pipe->buffers - 1);
707                         }
708                         __pipe_unlock(pipe);
709 
710                         return put_user(count, (int __user *)arg);
711                 default:
712                         return -ENOIOCTLCMD;
713         }
714 }
715 
716 /* No kernel lock held - fine */
717 static unsigned int
718 pipe_poll(struct file *filp, poll_table *wait)
719 {
720         unsigned int mask;
721         struct pipe_inode_info *pipe = filp->private_data;
722         int nrbufs;
723 
724         poll_wait(filp, &pipe->wait, wait);
725 
726         /* Reading only -- no need for acquiring the semaphore.  */
727         nrbufs = pipe->nrbufs;
728         mask = 0;
729         if (filp->f_mode & FMODE_READ) {
730                 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
731                 if (!pipe->writers && filp->f_version != pipe->w_counter)
732                         mask |= POLLHUP;
733         }
734 
735         if (filp->f_mode & FMODE_WRITE) {
736                 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
737                 /*
738                  * Most Unices do not set POLLERR for FIFOs but on Linux they
739                  * behave exactly like pipes for poll().
740                  */
741                 if (!pipe->readers)
742                         mask |= POLLERR;
743         }
744 
745         return mask;
746 }
747 
748 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
749 {
750         int kill = 0;
751 
752         spin_lock(&inode->i_lock);
753         if (!--pipe->files) {
754                 inode->i_pipe = NULL;
755                 kill = 1;
756         }
757         spin_unlock(&inode->i_lock);
758 
759         if (kill)
760                 free_pipe_info(pipe);
761 }
762 
763 static int
764 pipe_release(struct inode *inode, struct file *file)
765 {
766         struct pipe_inode_info *pipe = file->private_data;
767 
768         __pipe_lock(pipe);
769         if (file->f_mode & FMODE_READ)
770                 pipe->readers--;
771         if (file->f_mode & FMODE_WRITE)
772                 pipe->writers--;
773 
774         if (pipe->readers || pipe->writers) {
775                 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
776                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
777                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
778         }
779         __pipe_unlock(pipe);
780 
781         put_pipe_info(inode, pipe);
782         return 0;
783 }
784 
785 static int
786 pipe_fasync(int fd, struct file *filp, int on)
787 {
788         struct pipe_inode_info *pipe = filp->private_data;
789         int retval = 0;
790 
791         __pipe_lock(pipe);
792         if (filp->f_mode & FMODE_READ)
793                 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
794         if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
795                 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
796                 if (retval < 0 && (filp->f_mode & FMODE_READ))
797                         /* this can happen only if on == T */
798                         fasync_helper(-1, filp, 0, &pipe->fasync_readers);
799         }
800         __pipe_unlock(pipe);
801         return retval;
802 }
803 
804 static void account_pipe_buffers(struct pipe_inode_info *pipe,
805                                  unsigned long old, unsigned long new)
806 {
807         atomic_long_add(new - old, &pipe->user->pipe_bufs);
808 }
809 
810 static bool too_many_pipe_buffers_soft(struct user_struct *user)
811 {
812         return pipe_user_pages_soft &&
813                atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
814 }
815 
816 static bool too_many_pipe_buffers_hard(struct user_struct *user)
817 {
818         return pipe_user_pages_hard &&
819                atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
820 }
821 
822 struct pipe_inode_info *alloc_pipe_info(void)
823 {
824         struct pipe_inode_info *pipe;
825 
826         pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
827         if (pipe) {
828                 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
829                 struct user_struct *user = get_current_user();
830 
831                 if (!too_many_pipe_buffers_hard(user)) {
832                         if (too_many_pipe_buffers_soft(user))
833                                 pipe_bufs = 1;
834                         pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
835                 }
836 
837                 if (pipe->bufs) {
838                         init_waitqueue_head(&pipe->wait);
839                         pipe->r_counter = pipe->w_counter = 1;
840                         pipe->buffers = pipe_bufs;
841                         pipe->user = user;
842                         account_pipe_buffers(pipe, 0, pipe_bufs);
843                         mutex_init(&pipe->mutex);
844                         return pipe;
845                 }
846                 free_uid(user);
847                 kfree(pipe);
848         }
849 
850         return NULL;
851 }
852 
853 void free_pipe_info(struct pipe_inode_info *pipe)
854 {
855         int i;
856 
857         account_pipe_buffers(pipe, pipe->buffers, 0);
858         free_uid(pipe->user);
859         for (i = 0; i < pipe->buffers; i++) {
860                 struct pipe_buffer *buf = pipe->bufs + i;
861                 if (buf->ops)
862                         buf->ops->release(pipe, buf);
863         }
864         if (pipe->tmp_page)
865                 __free_page(pipe->tmp_page);
866         kfree(pipe->bufs);
867         kfree(pipe);
868 }
869 
870 static struct vfsmount *pipe_mnt __read_mostly;
871 
872 /*
873  * pipefs_dname() is called from d_path().
874  */
875 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
876 {
877         return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
878                                 dentry->d_inode->i_ino);
879 }
880 
881 static const struct dentry_operations pipefs_dentry_operations = {
882         .d_dname        = pipefs_dname,
883 };
884 
885 static struct inode * get_pipe_inode(void)
886 {
887         struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
888         struct pipe_inode_info *pipe;
889 
890         if (!inode)
891                 goto fail_inode;
892 
893         inode->i_ino = get_next_ino();
894 
895         pipe = alloc_pipe_info();
896         if (!pipe)
897                 goto fail_iput;
898 
899         inode->i_pipe = pipe;
900         pipe->files = 2;
901         pipe->readers = pipe->writers = 1;
902         inode->i_fop = &pipefifo_fops;
903 
904         /*
905          * Mark the inode dirty from the very beginning,
906          * that way it will never be moved to the dirty
907          * list because "mark_inode_dirty()" will think
908          * that it already _is_ on the dirty list.
909          */
910         inode->i_state = I_DIRTY;
911         inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
912         inode->i_uid = current_fsuid();
913         inode->i_gid = current_fsgid();
914         inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
915 
916         return inode;
917 
918 fail_iput:
919         iput(inode);
920 
921 fail_inode:
922         return NULL;
923 }
924 
925 int create_pipe_files(struct file **res, int flags)
926 {
927         int err;
928         struct inode *inode = get_pipe_inode();
929         struct file *f;
930         struct path path;
931         static struct qstr name = { .name = "" };
932 
933         if (!inode)
934                 return -ENFILE;
935 
936         err = -ENOMEM;
937         path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
938         if (!path.dentry)
939                 goto err_inode;
940         path.mnt = mntget(pipe_mnt);
941 
942         d_instantiate(path.dentry, inode);
943 
944         err = -ENFILE;
945         f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
946         if (IS_ERR(f))
947                 goto err_dentry;
948 
949         f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
950         f->private_data = inode->i_pipe;
951 
952         res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
953         if (IS_ERR(res[0]))
954                 goto err_file;
955 
956         path_get(&path);
957         res[0]->private_data = inode->i_pipe;
958         res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
959         res[1] = f;
960         return 0;
961 
962 err_file:
963         put_filp(f);
964 err_dentry:
965         free_pipe_info(inode->i_pipe);
966         path_put(&path);
967         return err;
968 
969 err_inode:
970         free_pipe_info(inode->i_pipe);
971         iput(inode);
972         return err;
973 }
974 
975 static int __do_pipe_flags(int *fd, struct file **files, int flags)
976 {
977         int error;
978         int fdw, fdr;
979 
980         if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
981                 return -EINVAL;
982 
983         error = create_pipe_files(files, flags);
984         if (error)
985                 return error;
986 
987         error = get_unused_fd_flags(flags);
988         if (error < 0)
989                 goto err_read_pipe;
990         fdr = error;
991 
992         error = get_unused_fd_flags(flags);
993         if (error < 0)
994                 goto err_fdr;
995         fdw = error;
996 
997         audit_fd_pair(fdr, fdw);
998         fd[0] = fdr;
999         fd[1] = fdw;
1000         return 0;
1001 
1002  err_fdr:
1003         put_unused_fd(fdr);
1004  err_read_pipe:
1005         fput(files[0]);
1006         fput(files[1]);
1007         return error;
1008 }
1009 
1010 int do_pipe_flags(int *fd, int flags)
1011 {
1012         struct file *files[2];
1013         int error = __do_pipe_flags(fd, files, flags);
1014         if (!error) {
1015                 fd_install(fd[0], files[0]);
1016                 fd_install(fd[1], files[1]);
1017         }
1018         return error;
1019 }
1020 
1021 /*
1022  * sys_pipe() is the normal C calling standard for creating
1023  * a pipe. It's not the way Unix traditionally does this, though.
1024  */
1025 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1026 {
1027         struct file *files[2];
1028         int fd[2];
1029         int error;
1030 
1031         error = __do_pipe_flags(fd, files, flags);
1032         if (!error) {
1033                 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1034                         fput(files[0]);
1035                         fput(files[1]);
1036                         put_unused_fd(fd[0]);
1037                         put_unused_fd(fd[1]);
1038                         error = -EFAULT;
1039                 } else {
1040                         fd_install(fd[0], files[0]);
1041                         fd_install(fd[1], files[1]);
1042                 }
1043         }
1044         return error;
1045 }
1046 
1047 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1048 {
1049         return sys_pipe2(fildes, 0);
1050 }
1051 
1052 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1053 {
1054         int cur = *cnt; 
1055 
1056         while (cur == *cnt) {
1057                 pipe_wait(pipe);
1058                 if (signal_pending(current))
1059                         break;
1060         }
1061         return cur == *cnt ? -ERESTARTSYS : 0;
1062 }
1063 
1064 static void wake_up_partner(struct pipe_inode_info *pipe)
1065 {
1066         wake_up_interruptible(&pipe->wait);
1067 }
1068 
1069 static int fifo_open(struct inode *inode, struct file *filp)
1070 {
1071         struct pipe_inode_info *pipe;
1072         bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1073         int ret;
1074 
1075         filp->f_version = 0;
1076 
1077         spin_lock(&inode->i_lock);
1078         if (inode->i_pipe) {
1079                 pipe = inode->i_pipe;
1080                 pipe->files++;
1081                 spin_unlock(&inode->i_lock);
1082         } else {
1083                 spin_unlock(&inode->i_lock);
1084                 pipe = alloc_pipe_info();
1085                 if (!pipe)
1086                         return -ENOMEM;
1087                 pipe->files = 1;
1088                 spin_lock(&inode->i_lock);
1089                 if (unlikely(inode->i_pipe)) {
1090                         inode->i_pipe->files++;
1091                         spin_unlock(&inode->i_lock);
1092                         free_pipe_info(pipe);
1093                         pipe = inode->i_pipe;
1094                 } else {
1095                         inode->i_pipe = pipe;
1096                         spin_unlock(&inode->i_lock);
1097                 }
1098         }
1099         filp->private_data = pipe;
1100         /* OK, we have a pipe and it's pinned down */
1101 
1102         __pipe_lock(pipe);
1103 
1104         /* We can only do regular read/write on fifos */
1105         filp->f_mode &= (FMODE_READ | FMODE_WRITE);
1106 
1107         switch (filp->f_mode) {
1108         case FMODE_READ:
1109         /*
1110          *  O_RDONLY
1111          *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1112          *  opened, even when there is no process writing the FIFO.
1113          */
1114                 pipe->r_counter++;
1115                 if (pipe->readers++ == 0)
1116                         wake_up_partner(pipe);
1117 
1118                 if (!is_pipe && !pipe->writers) {
1119                         if ((filp->f_flags & O_NONBLOCK)) {
1120                                 /* suppress POLLHUP until we have
1121                                  * seen a writer */
1122                                 filp->f_version = pipe->w_counter;
1123                         } else {
1124                                 if (wait_for_partner(pipe, &pipe->w_counter))
1125                                         goto err_rd;
1126                         }
1127                 }
1128                 break;
1129         
1130         case FMODE_WRITE:
1131         /*
1132          *  O_WRONLY
1133          *  POSIX.1 says that O_NONBLOCK means return -1 with
1134          *  errno=ENXIO when there is no process reading the FIFO.
1135          */
1136                 ret = -ENXIO;
1137                 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1138                         goto err;
1139 
1140                 pipe->w_counter++;
1141                 if (!pipe->writers++)
1142                         wake_up_partner(pipe);
1143 
1144                 if (!is_pipe && !pipe->readers) {
1145                         if (wait_for_partner(pipe, &pipe->r_counter))
1146                                 goto err_wr;
1147                 }
1148                 break;
1149         
1150         case FMODE_READ | FMODE_WRITE:
1151         /*
1152          *  O_RDWR
1153          *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1154          *  This implementation will NEVER block on a O_RDWR open, since
1155          *  the process can at least talk to itself.
1156          */
1157 
1158                 pipe->readers++;
1159                 pipe->writers++;
1160                 pipe->r_counter++;
1161                 pipe->w_counter++;
1162                 if (pipe->readers == 1 || pipe->writers == 1)
1163                         wake_up_partner(pipe);
1164                 break;
1165 
1166         default:
1167                 ret = -EINVAL;
1168                 goto err;
1169         }
1170 
1171         /* Ok! */
1172         __pipe_unlock(pipe);
1173         return 0;
1174 
1175 err_rd:
1176         if (!--pipe->readers)
1177                 wake_up_interruptible(&pipe->wait);
1178         ret = -ERESTARTSYS;
1179         goto err;
1180 
1181 err_wr:
1182         if (!--pipe->writers)
1183                 wake_up_interruptible(&pipe->wait);
1184         ret = -ERESTARTSYS;
1185         goto err;
1186 
1187 err:
1188         __pipe_unlock(pipe);
1189 
1190         put_pipe_info(inode, pipe);
1191         return ret;
1192 }
1193 
1194 const struct file_operations pipefifo_fops = {
1195         .open           = fifo_open,
1196         .llseek         = no_llseek,
1197         .read           = do_sync_read,
1198         .aio_read       = pipe_read,
1199         .write          = do_sync_write,
1200         .aio_write      = pipe_write,
1201         .poll           = pipe_poll,
1202         .unlocked_ioctl = pipe_ioctl,
1203         .release        = pipe_release,
1204         .fasync         = pipe_fasync,
1205 };
1206 
1207 /*
1208  * Allocate a new array of pipe buffers and copy the info over. Returns the
1209  * pipe size if successful, or return -ERROR on error.
1210  */
1211 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1212 {
1213         struct pipe_buffer *bufs;
1214 
1215         /*
1216          * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1217          * expect a lot of shrink+grow operations, just free and allocate
1218          * again like we would do for growing. If the pipe currently
1219          * contains more buffers than arg, then return busy.
1220          */
1221         if (nr_pages < pipe->nrbufs)
1222                 return -EBUSY;
1223 
1224         bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
1225         if (unlikely(!bufs))
1226                 return -ENOMEM;
1227 
1228         /*
1229          * The pipe array wraps around, so just start the new one at zero
1230          * and adjust the indexes.
1231          */
1232         if (pipe->nrbufs) {
1233                 unsigned int tail;
1234                 unsigned int head;
1235 
1236                 tail = pipe->curbuf + pipe->nrbufs;
1237                 if (tail < pipe->buffers)
1238                         tail = 0;
1239                 else
1240                         tail &= (pipe->buffers - 1);
1241 
1242                 head = pipe->nrbufs - tail;
1243                 if (head)
1244                         memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1245                 if (tail)
1246                         memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1247         }
1248 
1249         account_pipe_buffers(pipe, pipe->buffers, nr_pages);
1250         pipe->curbuf = 0;
1251         kfree(pipe->bufs);
1252         pipe->bufs = bufs;
1253         pipe->buffers = nr_pages;
1254         return nr_pages * PAGE_SIZE;
1255 }
1256 
1257 /*
1258  * Currently we rely on the pipe array holding a power-of-2 number
1259  * of pages.
1260  */
1261 static inline unsigned int round_pipe_size(unsigned int size)
1262 {
1263         unsigned long nr_pages;
1264 
1265         nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1266         return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1267 }
1268 
1269 /*
1270  * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1271  * will return an error.
1272  */
1273 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1274                  size_t *lenp, loff_t *ppos)
1275 {
1276         int ret;
1277 
1278         ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1279         if (ret < 0 || !write)
1280                 return ret;
1281 
1282         pipe_max_size = round_pipe_size(pipe_max_size);
1283         return ret;
1284 }
1285 
1286 /*
1287  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1288  * location, so checking ->i_pipe is not enough to verify that this is a
1289  * pipe.
1290  */
1291 struct pipe_inode_info *get_pipe_info(struct file *file)
1292 {
1293         return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1294 }
1295 
1296 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1297 {
1298         struct pipe_inode_info *pipe;
1299         long ret;
1300 
1301         pipe = get_pipe_info(file);
1302         if (!pipe)
1303                 return -EBADF;
1304 
1305         __pipe_lock(pipe);
1306 
1307         switch (cmd) {
1308         case F_SETPIPE_SZ: {
1309                 unsigned int size, nr_pages;
1310 
1311                 size = round_pipe_size(arg);
1312                 nr_pages = size >> PAGE_SHIFT;
1313 
1314                 ret = -EINVAL;
1315                 if (!nr_pages)
1316                         goto out;
1317 
1318                 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1319                         ret = -EPERM;
1320                         goto out;
1321                 } else if ((too_many_pipe_buffers_hard(pipe->user) ||
1322                             too_many_pipe_buffers_soft(pipe->user)) &&
1323                            !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1324                         ret = -EPERM;
1325                         goto out;
1326                 }
1327                 ret = pipe_set_size(pipe, nr_pages);
1328                 break;
1329                 }
1330         case F_GETPIPE_SZ:
1331                 ret = pipe->buffers * PAGE_SIZE;
1332                 break;
1333         default:
1334                 ret = -EINVAL;
1335                 break;
1336         }
1337 
1338 out:
1339         __pipe_unlock(pipe);
1340         return ret;
1341 }
1342 
1343 static const struct super_operations pipefs_ops = {
1344         .destroy_inode = free_inode_nonrcu,
1345         .statfs = simple_statfs,
1346 };
1347 
1348 /*
1349  * pipefs should _never_ be mounted by userland - too much of security hassle,
1350  * no real gain from having the whole whorehouse mounted. So we don't need
1351  * any operations on the root directory. However, we need a non-trivial
1352  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1353  */
1354 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1355                          int flags, const char *dev_name, void *data)
1356 {
1357         return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1358                         &pipefs_dentry_operations, PIPEFS_MAGIC);
1359 }
1360 
1361 static struct file_system_type pipe_fs_type = {
1362         .name           = "pipefs",
1363         .mount          = pipefs_mount,
1364         .kill_sb        = kill_anon_super,
1365 };
1366 
1367 static int __init init_pipe_fs(void)
1368 {
1369         int err = register_filesystem(&pipe_fs_type);
1370 
1371         if (!err) {
1372                 pipe_mnt = kern_mount(&pipe_fs_type);
1373                 if (IS_ERR(pipe_mnt)) {
1374                         err = PTR_ERR(pipe_mnt);
1375                         unregister_filesystem(&pipe_fs_type);
1376                 }
1377         }
1378         return err;
1379 }
1380 
1381 fs_initcall(init_pipe_fs);
1382 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp