~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/file.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *  linux/fs/file.c
  4  *
  5  *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
  6  *
  7  *  Manage the dynamic fd arrays in the process files_struct.
  8  */
  9 
 10 #include <linux/syscalls.h>
 11 #include <linux/export.h>
 12 #include <linux/fs.h>
 13 #include <linux/mm.h>
 14 #include <linux/sched/signal.h>
 15 #include <linux/slab.h>
 16 #include <linux/file.h>
 17 #include <linux/fdtable.h>
 18 #include <linux/bitops.h>
 19 #include <linux/spinlock.h>
 20 #include <linux/rcupdate.h>
 21 
 22 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
 23 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
 24 /* our min() is unusable in constant expressions ;-/ */
 25 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
 26 unsigned int sysctl_nr_open_max =
 27         __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
 28 
 29 static void __free_fdtable(struct fdtable *fdt)
 30 {
 31         kvfree(fdt->fd);
 32         kvfree(fdt->open_fds);
 33         kfree(fdt);
 34 }
 35 
 36 static void free_fdtable_rcu(struct rcu_head *rcu)
 37 {
 38         __free_fdtable(container_of(rcu, struct fdtable, rcu));
 39 }
 40 
 41 #define BITBIT_NR(nr)   BITS_TO_LONGS(BITS_TO_LONGS(nr))
 42 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
 43 
 44 /*
 45  * Copy 'count' fd bits from the old table to the new table and clear the extra
 46  * space if any.  This does not copy the file pointers.  Called with the files
 47  * spinlock held for write.
 48  */
 49 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
 50                             unsigned int count)
 51 {
 52         unsigned int cpy, set;
 53 
 54         cpy = count / BITS_PER_BYTE;
 55         set = (nfdt->max_fds - count) / BITS_PER_BYTE;
 56         memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
 57         memset((char *)nfdt->open_fds + cpy, 0, set);
 58         memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
 59         memset((char *)nfdt->close_on_exec + cpy, 0, set);
 60 
 61         cpy = BITBIT_SIZE(count);
 62         set = BITBIT_SIZE(nfdt->max_fds) - cpy;
 63         memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
 64         memset((char *)nfdt->full_fds_bits + cpy, 0, set);
 65 }
 66 
 67 /*
 68  * Copy all file descriptors from the old table to the new, expanded table and
 69  * clear the extra space.  Called with the files spinlock held for write.
 70  */
 71 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
 72 {
 73         unsigned int cpy, set;
 74 
 75         BUG_ON(nfdt->max_fds < ofdt->max_fds);
 76 
 77         cpy = ofdt->max_fds * sizeof(struct file *);
 78         set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
 79         memcpy(nfdt->fd, ofdt->fd, cpy);
 80         memset((char *)nfdt->fd + cpy, 0, set);
 81 
 82         copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
 83 }
 84 
 85 static struct fdtable * alloc_fdtable(unsigned int nr)
 86 {
 87         struct fdtable *fdt;
 88         void *data;
 89 
 90         /*
 91          * Figure out how many fds we actually want to support in this fdtable.
 92          * Allocation steps are keyed to the size of the fdarray, since it
 93          * grows far faster than any of the other dynamic data. We try to fit
 94          * the fdarray into comfortable page-tuned chunks: starting at 1024B
 95          * and growing in powers of two from there on.
 96          */
 97         nr /= (1024 / sizeof(struct file *));
 98         nr = roundup_pow_of_two(nr + 1);
 99         nr *= (1024 / sizeof(struct file *));
100         /*
101          * Note that this can drive nr *below* what we had passed if sysctl_nr_open
102          * had been set lower between the check in expand_files() and here.  Deal
103          * with that in caller, it's cheaper that way.
104          *
105          * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
106          * bitmaps handling below becomes unpleasant, to put it mildly...
107          */
108         if (unlikely(nr > sysctl_nr_open))
109                 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
110 
111         fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
112         if (!fdt)
113                 goto out;
114         fdt->max_fds = nr;
115         data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
116         if (!data)
117                 goto out_fdt;
118         fdt->fd = data;
119 
120         data = kvmalloc(max_t(size_t,
121                                  2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
122                                  GFP_KERNEL_ACCOUNT);
123         if (!data)
124                 goto out_arr;
125         fdt->open_fds = data;
126         data += nr / BITS_PER_BYTE;
127         fdt->close_on_exec = data;
128         data += nr / BITS_PER_BYTE;
129         fdt->full_fds_bits = data;
130 
131         return fdt;
132 
133 out_arr:
134         kvfree(fdt->fd);
135 out_fdt:
136         kfree(fdt);
137 out:
138         return NULL;
139 }
140 
141 /*
142  * Expand the file descriptor table.
143  * This function will allocate a new fdtable and both fd array and fdset, of
144  * the given size.
145  * Return <0 error code on error; 1 on successful completion.
146  * The files->file_lock should be held on entry, and will be held on exit.
147  */
148 static int expand_fdtable(struct files_struct *files, unsigned int nr)
149         __releases(files->file_lock)
150         __acquires(files->file_lock)
151 {
152         struct fdtable *new_fdt, *cur_fdt;
153 
154         spin_unlock(&files->file_lock);
155         new_fdt = alloc_fdtable(nr);
156 
157         /* make sure all __fd_install() have seen resize_in_progress
158          * or have finished their rcu_read_lock_sched() section.
159          */
160         if (atomic_read(&files->count) > 1)
161                 synchronize_rcu();
162 
163         spin_lock(&files->file_lock);
164         if (!new_fdt)
165                 return -ENOMEM;
166         /*
167          * extremely unlikely race - sysctl_nr_open decreased between the check in
168          * caller and alloc_fdtable().  Cheaper to catch it here...
169          */
170         if (unlikely(new_fdt->max_fds <= nr)) {
171                 __free_fdtable(new_fdt);
172                 return -EMFILE;
173         }
174         cur_fdt = files_fdtable(files);
175         BUG_ON(nr < cur_fdt->max_fds);
176         copy_fdtable(new_fdt, cur_fdt);
177         rcu_assign_pointer(files->fdt, new_fdt);
178         if (cur_fdt != &files->fdtab)
179                 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
180         /* coupled with smp_rmb() in __fd_install() */
181         smp_wmb();
182         return 1;
183 }
184 
185 /*
186  * Expand files.
187  * This function will expand the file structures, if the requested size exceeds
188  * the current capacity and there is room for expansion.
189  * Return <0 error code on error; 0 when nothing done; 1 when files were
190  * expanded and execution may have blocked.
191  * The files->file_lock should be held on entry, and will be held on exit.
192  */
193 static int expand_files(struct files_struct *files, unsigned int nr)
194         __releases(files->file_lock)
195         __acquires(files->file_lock)
196 {
197         struct fdtable *fdt;
198         int expanded = 0;
199 
200 repeat:
201         fdt = files_fdtable(files);
202 
203         /* Do we need to expand? */
204         if (nr < fdt->max_fds)
205                 return expanded;
206 
207         /* Can we expand? */
208         if (nr >= sysctl_nr_open)
209                 return -EMFILE;
210 
211         if (unlikely(files->resize_in_progress)) {
212                 spin_unlock(&files->file_lock);
213                 expanded = 1;
214                 wait_event(files->resize_wait, !files->resize_in_progress);
215                 spin_lock(&files->file_lock);
216                 goto repeat;
217         }
218 
219         /* All good, so we try */
220         files->resize_in_progress = true;
221         expanded = expand_fdtable(files, nr);
222         files->resize_in_progress = false;
223 
224         wake_up_all(&files->resize_wait);
225         return expanded;
226 }
227 
228 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
229 {
230         __set_bit(fd, fdt->close_on_exec);
231 }
232 
233 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
234 {
235         if (test_bit(fd, fdt->close_on_exec))
236                 __clear_bit(fd, fdt->close_on_exec);
237 }
238 
239 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
240 {
241         __set_bit(fd, fdt->open_fds);
242         fd /= BITS_PER_LONG;
243         if (!~fdt->open_fds[fd])
244                 __set_bit(fd, fdt->full_fds_bits);
245 }
246 
247 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
248 {
249         __clear_bit(fd, fdt->open_fds);
250         __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
251 }
252 
253 static unsigned int count_open_files(struct fdtable *fdt)
254 {
255         unsigned int size = fdt->max_fds;
256         unsigned int i;
257 
258         /* Find the last open fd */
259         for (i = size / BITS_PER_LONG; i > 0; ) {
260                 if (fdt->open_fds[--i])
261                         break;
262         }
263         i = (i + 1) * BITS_PER_LONG;
264         return i;
265 }
266 
267 /*
268  * Allocate a new files structure and copy contents from the
269  * passed in files structure.
270  * errorp will be valid only when the returned files_struct is NULL.
271  */
272 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
273 {
274         struct files_struct *newf;
275         struct file **old_fds, **new_fds;
276         unsigned int open_files, i;
277         struct fdtable *old_fdt, *new_fdt;
278 
279         *errorp = -ENOMEM;
280         newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
281         if (!newf)
282                 goto out;
283 
284         atomic_set(&newf->count, 1);
285 
286         spin_lock_init(&newf->file_lock);
287         newf->resize_in_progress = false;
288         init_waitqueue_head(&newf->resize_wait);
289         newf->next_fd = 0;
290         new_fdt = &newf->fdtab;
291         new_fdt->max_fds = NR_OPEN_DEFAULT;
292         new_fdt->close_on_exec = newf->close_on_exec_init;
293         new_fdt->open_fds = newf->open_fds_init;
294         new_fdt->full_fds_bits = newf->full_fds_bits_init;
295         new_fdt->fd = &newf->fd_array[0];
296 
297         spin_lock(&oldf->file_lock);
298         old_fdt = files_fdtable(oldf);
299         open_files = count_open_files(old_fdt);
300 
301         /*
302          * Check whether we need to allocate a larger fd array and fd set.
303          */
304         while (unlikely(open_files > new_fdt->max_fds)) {
305                 spin_unlock(&oldf->file_lock);
306 
307                 if (new_fdt != &newf->fdtab)
308                         __free_fdtable(new_fdt);
309 
310                 new_fdt = alloc_fdtable(open_files - 1);
311                 if (!new_fdt) {
312                         *errorp = -ENOMEM;
313                         goto out_release;
314                 }
315 
316                 /* beyond sysctl_nr_open; nothing to do */
317                 if (unlikely(new_fdt->max_fds < open_files)) {
318                         __free_fdtable(new_fdt);
319                         *errorp = -EMFILE;
320                         goto out_release;
321                 }
322 
323                 /*
324                  * Reacquire the oldf lock and a pointer to its fd table
325                  * who knows it may have a new bigger fd table. We need
326                  * the latest pointer.
327                  */
328                 spin_lock(&oldf->file_lock);
329                 old_fdt = files_fdtable(oldf);
330                 open_files = count_open_files(old_fdt);
331         }
332 
333         copy_fd_bitmaps(new_fdt, old_fdt, open_files);
334 
335         old_fds = old_fdt->fd;
336         new_fds = new_fdt->fd;
337 
338         for (i = open_files; i != 0; i--) {
339                 struct file *f = *old_fds++;
340                 if (f) {
341                         get_file(f);
342                 } else {
343                         /*
344                          * The fd may be claimed in the fd bitmap but not yet
345                          * instantiated in the files array if a sibling thread
346                          * is partway through open().  So make sure that this
347                          * fd is available to the new process.
348                          */
349                         __clear_open_fd(open_files - i, new_fdt);
350                 }
351                 rcu_assign_pointer(*new_fds++, f);
352         }
353         spin_unlock(&oldf->file_lock);
354 
355         /* clear the remainder */
356         memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
357 
358         rcu_assign_pointer(newf->fdt, new_fdt);
359 
360         return newf;
361 
362 out_release:
363         kmem_cache_free(files_cachep, newf);
364 out:
365         return NULL;
366 }
367 
368 static struct fdtable *close_files(struct files_struct * files)
369 {
370         /*
371          * It is safe to dereference the fd table without RCU or
372          * ->file_lock because this is the last reference to the
373          * files structure.
374          */
375         struct fdtable *fdt = rcu_dereference_raw(files->fdt);
376         unsigned int i, j = 0;
377 
378         for (;;) {
379                 unsigned long set;
380                 i = j * BITS_PER_LONG;
381                 if (i >= fdt->max_fds)
382                         break;
383                 set = fdt->open_fds[j++];
384                 while (set) {
385                         if (set & 1) {
386                                 struct file * file = xchg(&fdt->fd[i], NULL);
387                                 if (file) {
388                                         filp_close(file, files);
389                                         cond_resched();
390                                 }
391                         }
392                         i++;
393                         set >>= 1;
394                 }
395         }
396 
397         return fdt;
398 }
399 
400 struct files_struct *get_files_struct(struct task_struct *task)
401 {
402         struct files_struct *files;
403 
404         task_lock(task);
405         files = task->files;
406         if (files)
407                 atomic_inc(&files->count);
408         task_unlock(task);
409 
410         return files;
411 }
412 
413 void put_files_struct(struct files_struct *files)
414 {
415         if (atomic_dec_and_test(&files->count)) {
416                 struct fdtable *fdt = close_files(files);
417 
418                 /* free the arrays if they are not embedded */
419                 if (fdt != &files->fdtab)
420                         __free_fdtable(fdt);
421                 kmem_cache_free(files_cachep, files);
422         }
423 }
424 
425 void reset_files_struct(struct files_struct *files)
426 {
427         struct task_struct *tsk = current;
428         struct files_struct *old;
429 
430         old = tsk->files;
431         task_lock(tsk);
432         tsk->files = files;
433         task_unlock(tsk);
434         put_files_struct(old);
435 }
436 
437 void exit_files(struct task_struct *tsk)
438 {
439         struct files_struct * files = tsk->files;
440 
441         if (files) {
442                 task_lock(tsk);
443                 tsk->files = NULL;
444                 task_unlock(tsk);
445                 put_files_struct(files);
446         }
447 }
448 
449 struct files_struct init_files = {
450         .count          = ATOMIC_INIT(1),
451         .fdt            = &init_files.fdtab,
452         .fdtab          = {
453                 .max_fds        = NR_OPEN_DEFAULT,
454                 .fd             = &init_files.fd_array[0],
455                 .close_on_exec  = init_files.close_on_exec_init,
456                 .open_fds       = init_files.open_fds_init,
457                 .full_fds_bits  = init_files.full_fds_bits_init,
458         },
459         .file_lock      = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
460         .resize_wait    = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
461 };
462 
463 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
464 {
465         unsigned int maxfd = fdt->max_fds;
466         unsigned int maxbit = maxfd / BITS_PER_LONG;
467         unsigned int bitbit = start / BITS_PER_LONG;
468 
469         bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
470         if (bitbit > maxfd)
471                 return maxfd;
472         if (bitbit > start)
473                 start = bitbit;
474         return find_next_zero_bit(fdt->open_fds, maxfd, start);
475 }
476 
477 /*
478  * allocate a file descriptor, mark it busy.
479  */
480 int __alloc_fd(struct files_struct *files,
481                unsigned start, unsigned end, unsigned flags)
482 {
483         unsigned int fd;
484         int error;
485         struct fdtable *fdt;
486 
487         spin_lock(&files->file_lock);
488 repeat:
489         fdt = files_fdtable(files);
490         fd = start;
491         if (fd < files->next_fd)
492                 fd = files->next_fd;
493 
494         if (fd < fdt->max_fds)
495                 fd = find_next_fd(fdt, fd);
496 
497         /*
498          * N.B. For clone tasks sharing a files structure, this test
499          * will limit the total number of files that can be opened.
500          */
501         error = -EMFILE;
502         if (fd >= end)
503                 goto out;
504 
505         error = expand_files(files, fd);
506         if (error < 0)
507                 goto out;
508 
509         /*
510          * If we needed to expand the fs array we
511          * might have blocked - try again.
512          */
513         if (error)
514                 goto repeat;
515 
516         if (start <= files->next_fd)
517                 files->next_fd = fd + 1;
518 
519         __set_open_fd(fd, fdt);
520         if (flags & O_CLOEXEC)
521                 __set_close_on_exec(fd, fdt);
522         else
523                 __clear_close_on_exec(fd, fdt);
524         error = fd;
525 #if 1
526         /* Sanity check */
527         if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
528                 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
529                 rcu_assign_pointer(fdt->fd[fd], NULL);
530         }
531 #endif
532 
533 out:
534         spin_unlock(&files->file_lock);
535         return error;
536 }
537 
538 static int alloc_fd(unsigned start, unsigned flags)
539 {
540         return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
541 }
542 
543 int get_unused_fd_flags(unsigned flags)
544 {
545         return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
546 }
547 EXPORT_SYMBOL(get_unused_fd_flags);
548 
549 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
550 {
551         struct fdtable *fdt = files_fdtable(files);
552         __clear_open_fd(fd, fdt);
553         if (fd < files->next_fd)
554                 files->next_fd = fd;
555 }
556 
557 void put_unused_fd(unsigned int fd)
558 {
559         struct files_struct *files = current->files;
560         spin_lock(&files->file_lock);
561         __put_unused_fd(files, fd);
562         spin_unlock(&files->file_lock);
563 }
564 
565 EXPORT_SYMBOL(put_unused_fd);
566 
567 /*
568  * Install a file pointer in the fd array.
569  *
570  * The VFS is full of places where we drop the files lock between
571  * setting the open_fds bitmap and installing the file in the file
572  * array.  At any such point, we are vulnerable to a dup2() race
573  * installing a file in the array before us.  We need to detect this and
574  * fput() the struct file we are about to overwrite in this case.
575  *
576  * It should never happen - if we allow dup2() do it, _really_ bad things
577  * will follow.
578  *
579  * NOTE: __fd_install() variant is really, really low-level; don't
580  * use it unless you are forced to by truly lousy API shoved down
581  * your throat.  'files' *MUST* be either current->files or obtained
582  * by get_files_struct(current) done by whoever had given it to you,
583  * or really bad things will happen.  Normally you want to use
584  * fd_install() instead.
585  */
586 
587 void __fd_install(struct files_struct *files, unsigned int fd,
588                 struct file *file)
589 {
590         struct fdtable *fdt;
591 
592         rcu_read_lock_sched();
593 
594         if (unlikely(files->resize_in_progress)) {
595                 rcu_read_unlock_sched();
596                 spin_lock(&files->file_lock);
597                 fdt = files_fdtable(files);
598                 BUG_ON(fdt->fd[fd] != NULL);
599                 rcu_assign_pointer(fdt->fd[fd], file);
600                 spin_unlock(&files->file_lock);
601                 return;
602         }
603         /* coupled with smp_wmb() in expand_fdtable() */
604         smp_rmb();
605         fdt = rcu_dereference_sched(files->fdt);
606         BUG_ON(fdt->fd[fd] != NULL);
607         rcu_assign_pointer(fdt->fd[fd], file);
608         rcu_read_unlock_sched();
609 }
610 
611 void fd_install(unsigned int fd, struct file *file)
612 {
613         __fd_install(current->files, fd, file);
614 }
615 
616 EXPORT_SYMBOL(fd_install);
617 
618 /*
619  * The same warnings as for __alloc_fd()/__fd_install() apply here...
620  */
621 int __close_fd(struct files_struct *files, unsigned fd)
622 {
623         struct file *file;
624         struct fdtable *fdt;
625 
626         spin_lock(&files->file_lock);
627         fdt = files_fdtable(files);
628         if (fd >= fdt->max_fds)
629                 goto out_unlock;
630         file = fdt->fd[fd];
631         if (!file)
632                 goto out_unlock;
633         rcu_assign_pointer(fdt->fd[fd], NULL);
634         __put_unused_fd(files, fd);
635         spin_unlock(&files->file_lock);
636         return filp_close(file, files);
637 
638 out_unlock:
639         spin_unlock(&files->file_lock);
640         return -EBADF;
641 }
642 EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
643 
644 /*
645  * variant of __close_fd that gets a ref on the file for later fput
646  */
647 int __close_fd_get_file(unsigned int fd, struct file **res)
648 {
649         struct files_struct *files = current->files;
650         struct file *file;
651         struct fdtable *fdt;
652 
653         spin_lock(&files->file_lock);
654         fdt = files_fdtable(files);
655         if (fd >= fdt->max_fds)
656                 goto out_unlock;
657         file = fdt->fd[fd];
658         if (!file)
659                 goto out_unlock;
660         rcu_assign_pointer(fdt->fd[fd], NULL);
661         __put_unused_fd(files, fd);
662         spin_unlock(&files->file_lock);
663         get_file(file);
664         *res = file;
665         return filp_close(file, files);
666 
667 out_unlock:
668         spin_unlock(&files->file_lock);
669         *res = NULL;
670         return -ENOENT;
671 }
672 
673 void do_close_on_exec(struct files_struct *files)
674 {
675         unsigned i;
676         struct fdtable *fdt;
677 
678         /* exec unshares first */
679         spin_lock(&files->file_lock);
680         for (i = 0; ; i++) {
681                 unsigned long set;
682                 unsigned fd = i * BITS_PER_LONG;
683                 fdt = files_fdtable(files);
684                 if (fd >= fdt->max_fds)
685                         break;
686                 set = fdt->close_on_exec[i];
687                 if (!set)
688                         continue;
689                 fdt->close_on_exec[i] = 0;
690                 for ( ; set ; fd++, set >>= 1) {
691                         struct file *file;
692                         if (!(set & 1))
693                                 continue;
694                         file = fdt->fd[fd];
695                         if (!file)
696                                 continue;
697                         rcu_assign_pointer(fdt->fd[fd], NULL);
698                         __put_unused_fd(files, fd);
699                         spin_unlock(&files->file_lock);
700                         filp_close(file, files);
701                         cond_resched();
702                         spin_lock(&files->file_lock);
703                 }
704 
705         }
706         spin_unlock(&files->file_lock);
707 }
708 
709 static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
710 {
711         struct files_struct *files = current->files;
712         struct file *file;
713 
714         rcu_read_lock();
715 loop:
716         file = fcheck_files(files, fd);
717         if (file) {
718                 /* File object ref couldn't be taken.
719                  * dup2() atomicity guarantee is the reason
720                  * we loop to catch the new file (or NULL pointer)
721                  */
722                 if (file->f_mode & mask)
723                         file = NULL;
724                 else if (!get_file_rcu_many(file, refs))
725                         goto loop;
726         }
727         rcu_read_unlock();
728 
729         return file;
730 }
731 
732 struct file *fget_many(unsigned int fd, unsigned int refs)
733 {
734         return __fget(fd, FMODE_PATH, refs);
735 }
736 
737 struct file *fget(unsigned int fd)
738 {
739         return __fget(fd, FMODE_PATH, 1);
740 }
741 EXPORT_SYMBOL(fget);
742 
743 struct file *fget_raw(unsigned int fd)
744 {
745         return __fget(fd, 0, 1);
746 }
747 EXPORT_SYMBOL(fget_raw);
748 
749 /*
750  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
751  *
752  * You can use this instead of fget if you satisfy all of the following
753  * conditions:
754  * 1) You must call fput_light before exiting the syscall and returning control
755  *    to userspace (i.e. you cannot remember the returned struct file * after
756  *    returning to userspace).
757  * 2) You must not call filp_close on the returned struct file * in between
758  *    calls to fget_light and fput_light.
759  * 3) You must not clone the current task in between the calls to fget_light
760  *    and fput_light.
761  *
762  * The fput_needed flag returned by fget_light should be passed to the
763  * corresponding fput_light.
764  */
765 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
766 {
767         struct files_struct *files = current->files;
768         struct file *file;
769 
770         if (atomic_read(&files->count) == 1) {
771                 file = __fcheck_files(files, fd);
772                 if (!file || unlikely(file->f_mode & mask))
773                         return 0;
774                 return (unsigned long)file;
775         } else {
776                 file = __fget(fd, mask, 1);
777                 if (!file)
778                         return 0;
779                 return FDPUT_FPUT | (unsigned long)file;
780         }
781 }
782 unsigned long __fdget(unsigned int fd)
783 {
784         return __fget_light(fd, FMODE_PATH);
785 }
786 EXPORT_SYMBOL(__fdget);
787 
788 unsigned long __fdget_raw(unsigned int fd)
789 {
790         return __fget_light(fd, 0);
791 }
792 
793 unsigned long __fdget_pos(unsigned int fd)
794 {
795         unsigned long v = __fdget(fd);
796         struct file *file = (struct file *)(v & ~3);
797 
798         if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
799                 if (file_count(file) > 1) {
800                         v |= FDPUT_POS_UNLOCK;
801                         mutex_lock(&file->f_pos_lock);
802                 }
803         }
804         return v;
805 }
806 
807 void __f_unlock_pos(struct file *f)
808 {
809         mutex_unlock(&f->f_pos_lock);
810 }
811 
812 /*
813  * We only lock f_pos if we have threads or if the file might be
814  * shared with another process. In both cases we'll have an elevated
815  * file count (done either by fdget() or by fork()).
816  */
817 
818 void set_close_on_exec(unsigned int fd, int flag)
819 {
820         struct files_struct *files = current->files;
821         struct fdtable *fdt;
822         spin_lock(&files->file_lock);
823         fdt = files_fdtable(files);
824         if (flag)
825                 __set_close_on_exec(fd, fdt);
826         else
827                 __clear_close_on_exec(fd, fdt);
828         spin_unlock(&files->file_lock);
829 }
830 
831 bool get_close_on_exec(unsigned int fd)
832 {
833         struct files_struct *files = current->files;
834         struct fdtable *fdt;
835         bool res;
836         rcu_read_lock();
837         fdt = files_fdtable(files);
838         res = close_on_exec(fd, fdt);
839         rcu_read_unlock();
840         return res;
841 }
842 
843 static int do_dup2(struct files_struct *files,
844         struct file *file, unsigned fd, unsigned flags)
845 __releases(&files->file_lock)
846 {
847         struct file *tofree;
848         struct fdtable *fdt;
849 
850         /*
851          * We need to detect attempts to do dup2() over allocated but still
852          * not finished descriptor.  NB: OpenBSD avoids that at the price of
853          * extra work in their equivalent of fget() - they insert struct
854          * file immediately after grabbing descriptor, mark it larval if
855          * more work (e.g. actual opening) is needed and make sure that
856          * fget() treats larval files as absent.  Potentially interesting,
857          * but while extra work in fget() is trivial, locking implications
858          * and amount of surgery on open()-related paths in VFS are not.
859          * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
860          * deadlocks in rather amusing ways, AFAICS.  All of that is out of
861          * scope of POSIX or SUS, since neither considers shared descriptor
862          * tables and this condition does not arise without those.
863          */
864         fdt = files_fdtable(files);
865         tofree = fdt->fd[fd];
866         if (!tofree && fd_is_open(fd, fdt))
867                 goto Ebusy;
868         get_file(file);
869         rcu_assign_pointer(fdt->fd[fd], file);
870         __set_open_fd(fd, fdt);
871         if (flags & O_CLOEXEC)
872                 __set_close_on_exec(fd, fdt);
873         else
874                 __clear_close_on_exec(fd, fdt);
875         spin_unlock(&files->file_lock);
876 
877         if (tofree)
878                 filp_close(tofree, files);
879 
880         return fd;
881 
882 Ebusy:
883         spin_unlock(&files->file_lock);
884         return -EBUSY;
885 }
886 
887 int replace_fd(unsigned fd, struct file *file, unsigned flags)
888 {
889         int err;
890         struct files_struct *files = current->files;
891 
892         if (!file)
893                 return __close_fd(files, fd);
894 
895         if (fd >= rlimit(RLIMIT_NOFILE))
896                 return -EBADF;
897 
898         spin_lock(&files->file_lock);
899         err = expand_files(files, fd);
900         if (unlikely(err < 0))
901                 goto out_unlock;
902         return do_dup2(files, file, fd, flags);
903 
904 out_unlock:
905         spin_unlock(&files->file_lock);
906         return err;
907 }
908 
909 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
910 {
911         int err = -EBADF;
912         struct file *file;
913         struct files_struct *files = current->files;
914 
915         if ((flags & ~O_CLOEXEC) != 0)
916                 return -EINVAL;
917 
918         if (unlikely(oldfd == newfd))
919                 return -EINVAL;
920 
921         if (newfd >= rlimit(RLIMIT_NOFILE))
922                 return -EBADF;
923 
924         spin_lock(&files->file_lock);
925         err = expand_files(files, newfd);
926         file = fcheck(oldfd);
927         if (unlikely(!file))
928                 goto Ebadf;
929         if (unlikely(err < 0)) {
930                 if (err == -EMFILE)
931                         goto Ebadf;
932                 goto out_unlock;
933         }
934         return do_dup2(files, file, newfd, flags);
935 
936 Ebadf:
937         err = -EBADF;
938 out_unlock:
939         spin_unlock(&files->file_lock);
940         return err;
941 }
942 
943 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
944 {
945         return ksys_dup3(oldfd, newfd, flags);
946 }
947 
948 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
949 {
950         if (unlikely(newfd == oldfd)) { /* corner case */
951                 struct files_struct *files = current->files;
952                 int retval = oldfd;
953 
954                 rcu_read_lock();
955                 if (!fcheck_files(files, oldfd))
956                         retval = -EBADF;
957                 rcu_read_unlock();
958                 return retval;
959         }
960         return ksys_dup3(oldfd, newfd, 0);
961 }
962 
963 int ksys_dup(unsigned int fildes)
964 {
965         int ret = -EBADF;
966         struct file *file = fget_raw(fildes);
967 
968         if (file) {
969                 ret = get_unused_fd_flags(0);
970                 if (ret >= 0)
971                         fd_install(ret, file);
972                 else
973                         fput(file);
974         }
975         return ret;
976 }
977 
978 SYSCALL_DEFINE1(dup, unsigned int, fildes)
979 {
980         return ksys_dup(fildes);
981 }
982 
983 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
984 {
985         int err;
986         if (from >= rlimit(RLIMIT_NOFILE))
987                 return -EINVAL;
988         err = alloc_fd(from, flags);
989         if (err >= 0) {
990                 get_file(file);
991                 fd_install(err, file);
992         }
993         return err;
994 }
995 
996 int iterate_fd(struct files_struct *files, unsigned n,
997                 int (*f)(const void *, struct file *, unsigned),
998                 const void *p)
999 {
1000         struct fdtable *fdt;
1001         int res = 0;
1002         if (!files)
1003                 return 0;
1004         spin_lock(&files->file_lock);
1005         for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1006                 struct file *file;
1007                 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1008                 if (!file)
1009                         continue;
1010                 res = f(p, file, n);
1011                 if (res)
1012                         break;
1013         }
1014         spin_unlock(&files->file_lock);
1015         return res;
1016 }
1017 EXPORT_SYMBOL(iterate_fd);
1018 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp