~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/eventpoll.c

Version: ~ [ linux-5.8-rc3 ] ~ [ linux-5.7.5 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.48 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.129 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.185 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.228 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.228 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  fs/eventpoll.c (Efficient event retrieval implementation)
  3  *  Copyright (C) 2001,...,2009  Davide Libenzi
  4  *
  5  *  This program is free software; you can redistribute it and/or modify
  6  *  it under the terms of the GNU General Public License as published by
  7  *  the Free Software Foundation; either version 2 of the License, or
  8  *  (at your option) any later version.
  9  *
 10  *  Davide Libenzi <davidel@xmailserver.org>
 11  *
 12  */
 13 
 14 #include <linux/init.h>
 15 #include <linux/kernel.h>
 16 #include <linux/sched/signal.h>
 17 #include <linux/fs.h>
 18 #include <linux/file.h>
 19 #include <linux/signal.h>
 20 #include <linux/errno.h>
 21 #include <linux/mm.h>
 22 #include <linux/slab.h>
 23 #include <linux/poll.h>
 24 #include <linux/string.h>
 25 #include <linux/list.h>
 26 #include <linux/hash.h>
 27 #include <linux/spinlock.h>
 28 #include <linux/syscalls.h>
 29 #include <linux/rbtree.h>
 30 #include <linux/wait.h>
 31 #include <linux/eventpoll.h>
 32 #include <linux/mount.h>
 33 #include <linux/bitops.h>
 34 #include <linux/mutex.h>
 35 #include <linux/anon_inodes.h>
 36 #include <linux/device.h>
 37 #include <linux/uaccess.h>
 38 #include <asm/io.h>
 39 #include <asm/mman.h>
 40 #include <linux/atomic.h>
 41 #include <linux/proc_fs.h>
 42 #include <linux/seq_file.h>
 43 #include <linux/compat.h>
 44 #include <linux/rculist.h>
 45 #include <net/busy_poll.h>
 46 
 47 /*
 48  * LOCKING:
 49  * There are three level of locking required by epoll :
 50  *
 51  * 1) epmutex (mutex)
 52  * 2) ep->mtx (mutex)
 53  * 3) ep->lock (spinlock)
 54  *
 55  * The acquire order is the one listed above, from 1 to 3.
 56  * We need a spinlock (ep->lock) because we manipulate objects
 57  * from inside the poll callback, that might be triggered from
 58  * a wake_up() that in turn might be called from IRQ context.
 59  * So we can't sleep inside the poll callback and hence we need
 60  * a spinlock. During the event transfer loop (from kernel to
 61  * user space) we could end up sleeping due a copy_to_user(), so
 62  * we need a lock that will allow us to sleep. This lock is a
 63  * mutex (ep->mtx). It is acquired during the event transfer loop,
 64  * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
 65  * Then we also need a global mutex to serialize eventpoll_release_file()
 66  * and ep_free().
 67  * This mutex is acquired by ep_free() during the epoll file
 68  * cleanup path and it is also acquired by eventpoll_release_file()
 69  * if a file has been pushed inside an epoll set and it is then
 70  * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
 71  * It is also acquired when inserting an epoll fd onto another epoll
 72  * fd. We do this so that we walk the epoll tree and ensure that this
 73  * insertion does not create a cycle of epoll file descriptors, which
 74  * could lead to deadlock. We need a global mutex to prevent two
 75  * simultaneous inserts (A into B and B into A) from racing and
 76  * constructing a cycle without either insert observing that it is
 77  * going to.
 78  * It is necessary to acquire multiple "ep->mtx"es at once in the
 79  * case when one epoll fd is added to another. In this case, we
 80  * always acquire the locks in the order of nesting (i.e. after
 81  * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
 82  * before e2->mtx). Since we disallow cycles of epoll file
 83  * descriptors, this ensures that the mutexes are well-ordered. In
 84  * order to communicate this nesting to lockdep, when walking a tree
 85  * of epoll file descriptors, we use the current recursion depth as
 86  * the lockdep subkey.
 87  * It is possible to drop the "ep->mtx" and to use the global
 88  * mutex "epmutex" (together with "ep->lock") to have it working,
 89  * but having "ep->mtx" will make the interface more scalable.
 90  * Events that require holding "epmutex" are very rare, while for
 91  * normal operations the epoll private "ep->mtx" will guarantee
 92  * a better scalability.
 93  */
 94 
 95 /* Epoll private bits inside the event mask */
 96 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
 97 
 98 #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
 99 
100 #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
101                                 EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
102 
103 /* Maximum number of nesting allowed inside epoll sets */
104 #define EP_MAX_NESTS 4
105 
106 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
107 
108 #define EP_UNACTIVE_PTR ((void *) -1L)
109 
110 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
111 
112 struct epoll_filefd {
113         struct file *file;
114         int fd;
115 } __packed;
116 
117 /*
118  * Structure used to track possible nested calls, for too deep recursions
119  * and loop cycles.
120  */
121 struct nested_call_node {
122         struct list_head llink;
123         void *cookie;
124         void *ctx;
125 };
126 
127 /*
128  * This structure is used as collector for nested calls, to check for
129  * maximum recursion dept and loop cycles.
130  */
131 struct nested_calls {
132         struct list_head tasks_call_list;
133         spinlock_t lock;
134 };
135 
136 /*
137  * Each file descriptor added to the eventpoll interface will
138  * have an entry of this type linked to the "rbr" RB tree.
139  * Avoid increasing the size of this struct, there can be many thousands
140  * of these on a server and we do not want this to take another cache line.
141  */
142 struct epitem {
143         union {
144                 /* RB tree node links this structure to the eventpoll RB tree */
145                 struct rb_node rbn;
146                 /* Used to free the struct epitem */
147                 struct rcu_head rcu;
148         };
149 
150         /* List header used to link this structure to the eventpoll ready list */
151         struct list_head rdllink;
152 
153         /*
154          * Works together "struct eventpoll"->ovflist in keeping the
155          * single linked chain of items.
156          */
157         struct epitem *next;
158 
159         /* The file descriptor information this item refers to */
160         struct epoll_filefd ffd;
161 
162         /* Number of active wait queue attached to poll operations */
163         int nwait;
164 
165         /* List containing poll wait queues */
166         struct list_head pwqlist;
167 
168         /* The "container" of this item */
169         struct eventpoll *ep;
170 
171         /* List header used to link this item to the "struct file" items list */
172         struct list_head fllink;
173 
174         /* wakeup_source used when EPOLLWAKEUP is set */
175         struct wakeup_source __rcu *ws;
176 
177         /* The structure that describe the interested events and the source fd */
178         struct epoll_event event;
179 };
180 
181 /*
182  * This structure is stored inside the "private_data" member of the file
183  * structure and represents the main data structure for the eventpoll
184  * interface.
185  */
186 struct eventpoll {
187         /* Protect the access to this structure */
188         spinlock_t lock;
189 
190         /*
191          * This mutex is used to ensure that files are not removed
192          * while epoll is using them. This is held during the event
193          * collection loop, the file cleanup path, the epoll file exit
194          * code and the ctl operations.
195          */
196         struct mutex mtx;
197 
198         /* Wait queue used by sys_epoll_wait() */
199         wait_queue_head_t wq;
200 
201         /* Wait queue used by file->poll() */
202         wait_queue_head_t poll_wait;
203 
204         /* List of ready file descriptors */
205         struct list_head rdllist;
206 
207         /* RB tree root used to store monitored fd structs */
208         struct rb_root_cached rbr;
209 
210         /*
211          * This is a single linked list that chains all the "struct epitem" that
212          * happened while transferring ready events to userspace w/out
213          * holding ->lock.
214          */
215         struct epitem *ovflist;
216 
217         /* wakeup_source used when ep_scan_ready_list is running */
218         struct wakeup_source *ws;
219 
220         /* The user that created the eventpoll descriptor */
221         struct user_struct *user;
222 
223         struct file *file;
224 
225         /* used to optimize loop detection check */
226         int visited;
227         struct list_head visited_list_link;
228 
229 #ifdef CONFIG_NET_RX_BUSY_POLL
230         /* used to track busy poll napi_id */
231         unsigned int napi_id;
232 #endif
233 };
234 
235 /* Wait structure used by the poll hooks */
236 struct eppoll_entry {
237         /* List header used to link this structure to the "struct epitem" */
238         struct list_head llink;
239 
240         /* The "base" pointer is set to the container "struct epitem" */
241         struct epitem *base;
242 
243         /*
244          * Wait queue item that will be linked to the target file wait
245          * queue head.
246          */
247         wait_queue_entry_t wait;
248 
249         /* The wait queue head that linked the "wait" wait queue item */
250         wait_queue_head_t *whead;
251 };
252 
253 /* Wrapper struct used by poll queueing */
254 struct ep_pqueue {
255         poll_table pt;
256         struct epitem *epi;
257 };
258 
259 /* Used by the ep_send_events() function as callback private data */
260 struct ep_send_events_data {
261         int maxevents;
262         struct epoll_event __user *events;
263         int res;
264 };
265 
266 /*
267  * Configuration options available inside /proc/sys/fs/epoll/
268  */
269 /* Maximum number of epoll watched descriptors, per user */
270 static long max_user_watches __read_mostly;
271 
272 /*
273  * This mutex is used to serialize ep_free() and eventpoll_release_file().
274  */
275 static DEFINE_MUTEX(epmutex);
276 
277 /* Used to check for epoll file descriptor inclusion loops */
278 static struct nested_calls poll_loop_ncalls;
279 
280 /* Slab cache used to allocate "struct epitem" */
281 static struct kmem_cache *epi_cache __read_mostly;
282 
283 /* Slab cache used to allocate "struct eppoll_entry" */
284 static struct kmem_cache *pwq_cache __read_mostly;
285 
286 /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
287 static LIST_HEAD(visited_list);
288 
289 /*
290  * List of files with newly added links, where we may need to limit the number
291  * of emanating paths. Protected by the epmutex.
292  */
293 static LIST_HEAD(tfile_check_list);
294 
295 #ifdef CONFIG_SYSCTL
296 
297 #include <linux/sysctl.h>
298 
299 static long zero;
300 static long long_max = LONG_MAX;
301 
302 struct ctl_table epoll_table[] = {
303         {
304                 .procname       = "max_user_watches",
305                 .data           = &max_user_watches,
306                 .maxlen         = sizeof(max_user_watches),
307                 .mode           = 0644,
308                 .proc_handler   = proc_doulongvec_minmax,
309                 .extra1         = &zero,
310                 .extra2         = &long_max,
311         },
312         { }
313 };
314 #endif /* CONFIG_SYSCTL */
315 
316 static const struct file_operations eventpoll_fops;
317 
318 static inline int is_file_epoll(struct file *f)
319 {
320         return f->f_op == &eventpoll_fops;
321 }
322 
323 /* Setup the structure that is used as key for the RB tree */
324 static inline void ep_set_ffd(struct epoll_filefd *ffd,
325                               struct file *file, int fd)
326 {
327         ffd->file = file;
328         ffd->fd = fd;
329 }
330 
331 /* Compare RB tree keys */
332 static inline int ep_cmp_ffd(struct epoll_filefd *p1,
333                              struct epoll_filefd *p2)
334 {
335         return (p1->file > p2->file ? +1:
336                 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
337 }
338 
339 /* Tells us if the item is currently linked */
340 static inline int ep_is_linked(struct list_head *p)
341 {
342         return !list_empty(p);
343 }
344 
345 static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
346 {
347         return container_of(p, struct eppoll_entry, wait);
348 }
349 
350 /* Get the "struct epitem" from a wait queue pointer */
351 static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
352 {
353         return container_of(p, struct eppoll_entry, wait)->base;
354 }
355 
356 /* Get the "struct epitem" from an epoll queue wrapper */
357 static inline struct epitem *ep_item_from_epqueue(poll_table *p)
358 {
359         return container_of(p, struct ep_pqueue, pt)->epi;
360 }
361 
362 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
363 static inline int ep_op_has_event(int op)
364 {
365         return op != EPOLL_CTL_DEL;
366 }
367 
368 /* Initialize the poll safe wake up structure */
369 static void ep_nested_calls_init(struct nested_calls *ncalls)
370 {
371         INIT_LIST_HEAD(&ncalls->tasks_call_list);
372         spin_lock_init(&ncalls->lock);
373 }
374 
375 /**
376  * ep_events_available - Checks if ready events might be available.
377  *
378  * @ep: Pointer to the eventpoll context.
379  *
380  * Returns: Returns a value different than zero if ready events are available,
381  *          or zero otherwise.
382  */
383 static inline int ep_events_available(struct eventpoll *ep)
384 {
385         return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
386 }
387 
388 #ifdef CONFIG_NET_RX_BUSY_POLL
389 static bool ep_busy_loop_end(void *p, unsigned long start_time)
390 {
391         struct eventpoll *ep = p;
392 
393         return ep_events_available(ep) || busy_loop_timeout(start_time);
394 }
395 #endif /* CONFIG_NET_RX_BUSY_POLL */
396 
397 /*
398  * Busy poll if globally on and supporting sockets found && no events,
399  * busy loop will return if need_resched or ep_events_available.
400  *
401  * we must do our busy polling with irqs enabled
402  */
403 static void ep_busy_loop(struct eventpoll *ep, int nonblock)
404 {
405 #ifdef CONFIG_NET_RX_BUSY_POLL
406         unsigned int napi_id = READ_ONCE(ep->napi_id);
407 
408         if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on())
409                 napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep);
410 #endif
411 }
412 
413 static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
414 {
415 #ifdef CONFIG_NET_RX_BUSY_POLL
416         if (ep->napi_id)
417                 ep->napi_id = 0;
418 #endif
419 }
420 
421 /*
422  * Set epoll busy poll NAPI ID from sk.
423  */
424 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
425 {
426 #ifdef CONFIG_NET_RX_BUSY_POLL
427         struct eventpoll *ep;
428         unsigned int napi_id;
429         struct socket *sock;
430         struct sock *sk;
431         int err;
432 
433         if (!net_busy_loop_on())
434                 return;
435 
436         sock = sock_from_file(epi->ffd.file, &err);
437         if (!sock)
438                 return;
439 
440         sk = sock->sk;
441         if (!sk)
442                 return;
443 
444         napi_id = READ_ONCE(sk->sk_napi_id);
445         ep = epi->ep;
446 
447         /* Non-NAPI IDs can be rejected
448          *      or
449          * Nothing to do if we already have this ID
450          */
451         if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
452                 return;
453 
454         /* record NAPI ID for use in next busy poll */
455         ep->napi_id = napi_id;
456 #endif
457 }
458 
459 /**
460  * ep_call_nested - Perform a bound (possibly) nested call, by checking
461  *                  that the recursion limit is not exceeded, and that
462  *                  the same nested call (by the meaning of same cookie) is
463  *                  no re-entered.
464  *
465  * @ncalls: Pointer to the nested_calls structure to be used for this call.
466  * @max_nests: Maximum number of allowed nesting calls.
467  * @nproc: Nested call core function pointer.
468  * @priv: Opaque data to be passed to the @nproc callback.
469  * @cookie: Cookie to be used to identify this nested call.
470  * @ctx: This instance context.
471  *
472  * Returns: Returns the code returned by the @nproc callback, or -1 if
473  *          the maximum recursion limit has been exceeded.
474  */
475 static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
476                           int (*nproc)(void *, void *, int), void *priv,
477                           void *cookie, void *ctx)
478 {
479         int error, call_nests = 0;
480         unsigned long flags;
481         struct list_head *lsthead = &ncalls->tasks_call_list;
482         struct nested_call_node *tncur;
483         struct nested_call_node tnode;
484 
485         spin_lock_irqsave(&ncalls->lock, flags);
486 
487         /*
488          * Try to see if the current task is already inside this wakeup call.
489          * We use a list here, since the population inside this set is always
490          * very much limited.
491          */
492         list_for_each_entry(tncur, lsthead, llink) {
493                 if (tncur->ctx == ctx &&
494                     (tncur->cookie == cookie || ++call_nests > max_nests)) {
495                         /*
496                          * Ops ... loop detected or maximum nest level reached.
497                          * We abort this wake by breaking the cycle itself.
498                          */
499                         error = -1;
500                         goto out_unlock;
501                 }
502         }
503 
504         /* Add the current task and cookie to the list */
505         tnode.ctx = ctx;
506         tnode.cookie = cookie;
507         list_add(&tnode.llink, lsthead);
508 
509         spin_unlock_irqrestore(&ncalls->lock, flags);
510 
511         /* Call the nested function */
512         error = (*nproc)(priv, cookie, call_nests);
513 
514         /* Remove the current task from the list */
515         spin_lock_irqsave(&ncalls->lock, flags);
516         list_del(&tnode.llink);
517 out_unlock:
518         spin_unlock_irqrestore(&ncalls->lock, flags);
519 
520         return error;
521 }
522 
523 /*
524  * As described in commit 0ccf831cb lockdep: annotate epoll
525  * the use of wait queues used by epoll is done in a very controlled
526  * manner. Wake ups can nest inside each other, but are never done
527  * with the same locking. For example:
528  *
529  *   dfd = socket(...);
530  *   efd1 = epoll_create();
531  *   efd2 = epoll_create();
532  *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
533  *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
534  *
535  * When a packet arrives to the device underneath "dfd", the net code will
536  * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
537  * callback wakeup entry on that queue, and the wake_up() performed by the
538  * "dfd" net code will end up in ep_poll_callback(). At this point epoll
539  * (efd1) notices that it may have some event ready, so it needs to wake up
540  * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
541  * that ends up in another wake_up(), after having checked about the
542  * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
543  * avoid stack blasting.
544  *
545  * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
546  * this special case of epoll.
547  */
548 #ifdef CONFIG_DEBUG_LOCK_ALLOC
549 
550 static struct nested_calls poll_safewake_ncalls;
551 
552 static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
553 {
554         unsigned long flags;
555         wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
556 
557         spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
558         wake_up_locked_poll(wqueue, EPOLLIN);
559         spin_unlock_irqrestore(&wqueue->lock, flags);
560 
561         return 0;
562 }
563 
564 static void ep_poll_safewake(wait_queue_head_t *wq)
565 {
566         int this_cpu = get_cpu();
567 
568         ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
569                        ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
570 
571         put_cpu();
572 }
573 
574 #else
575 
576 static void ep_poll_safewake(wait_queue_head_t *wq)
577 {
578         wake_up_poll(wq, EPOLLIN);
579 }
580 
581 #endif
582 
583 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
584 {
585         wait_queue_head_t *whead;
586 
587         rcu_read_lock();
588         /*
589          * If it is cleared by POLLFREE, it should be rcu-safe.
590          * If we read NULL we need a barrier paired with
591          * smp_store_release() in ep_poll_callback(), otherwise
592          * we rely on whead->lock.
593          */
594         whead = smp_load_acquire(&pwq->whead);
595         if (whead)
596                 remove_wait_queue(whead, &pwq->wait);
597         rcu_read_unlock();
598 }
599 
600 /*
601  * This function unregisters poll callbacks from the associated file
602  * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
603  * ep_free).
604  */
605 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
606 {
607         struct list_head *lsthead = &epi->pwqlist;
608         struct eppoll_entry *pwq;
609 
610         while (!list_empty(lsthead)) {
611                 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
612 
613                 list_del(&pwq->llink);
614                 ep_remove_wait_queue(pwq);
615                 kmem_cache_free(pwq_cache, pwq);
616         }
617 }
618 
619 /* call only when ep->mtx is held */
620 static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
621 {
622         return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
623 }
624 
625 /* call only when ep->mtx is held */
626 static inline void ep_pm_stay_awake(struct epitem *epi)
627 {
628         struct wakeup_source *ws = ep_wakeup_source(epi);
629 
630         if (ws)
631                 __pm_stay_awake(ws);
632 }
633 
634 static inline bool ep_has_wakeup_source(struct epitem *epi)
635 {
636         return rcu_access_pointer(epi->ws) ? true : false;
637 }
638 
639 /* call when ep->mtx cannot be held (ep_poll_callback) */
640 static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
641 {
642         struct wakeup_source *ws;
643 
644         rcu_read_lock();
645         ws = rcu_dereference(epi->ws);
646         if (ws)
647                 __pm_stay_awake(ws);
648         rcu_read_unlock();
649 }
650 
651 /**
652  * ep_scan_ready_list - Scans the ready list in a way that makes possible for
653  *                      the scan code, to call f_op->poll(). Also allows for
654  *                      O(NumReady) performance.
655  *
656  * @ep: Pointer to the epoll private data structure.
657  * @sproc: Pointer to the scan callback.
658  * @priv: Private opaque data passed to the @sproc callback.
659  * @depth: The current depth of recursive f_op->poll calls.
660  * @ep_locked: caller already holds ep->mtx
661  *
662  * Returns: The same integer error code returned by the @sproc callback.
663  */
664 static __poll_t ep_scan_ready_list(struct eventpoll *ep,
665                               __poll_t (*sproc)(struct eventpoll *,
666                                            struct list_head *, void *),
667                               void *priv, int depth, bool ep_locked)
668 {
669         __poll_t res;
670         int pwake = 0;
671         unsigned long flags;
672         struct epitem *epi, *nepi;
673         LIST_HEAD(txlist);
674 
675         /*
676          * We need to lock this because we could be hit by
677          * eventpoll_release_file() and epoll_ctl().
678          */
679 
680         if (!ep_locked)
681                 mutex_lock_nested(&ep->mtx, depth);
682 
683         /*
684          * Steal the ready list, and re-init the original one to the
685          * empty list. Also, set ep->ovflist to NULL so that events
686          * happening while looping w/out locks, are not lost. We cannot
687          * have the poll callback to queue directly on ep->rdllist,
688          * because we want the "sproc" callback to be able to do it
689          * in a lockless way.
690          */
691         spin_lock_irqsave(&ep->lock, flags);
692         list_splice_init(&ep->rdllist, &txlist);
693         ep->ovflist = NULL;
694         spin_unlock_irqrestore(&ep->lock, flags);
695 
696         /*
697          * Now call the callback function.
698          */
699         res = (*sproc)(ep, &txlist, priv);
700 
701         spin_lock_irqsave(&ep->lock, flags);
702         /*
703          * During the time we spent inside the "sproc" callback, some
704          * other events might have been queued by the poll callback.
705          * We re-insert them inside the main ready-list here.
706          */
707         for (nepi = ep->ovflist; (epi = nepi) != NULL;
708              nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
709                 /*
710                  * We need to check if the item is already in the list.
711                  * During the "sproc" callback execution time, items are
712                  * queued into ->ovflist but the "txlist" might already
713                  * contain them, and the list_splice() below takes care of them.
714                  */
715                 if (!ep_is_linked(&epi->rdllink)) {
716                         list_add_tail(&epi->rdllink, &ep->rdllist);
717                         ep_pm_stay_awake(epi);
718                 }
719         }
720         /*
721          * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
722          * releasing the lock, events will be queued in the normal way inside
723          * ep->rdllist.
724          */
725         ep->ovflist = EP_UNACTIVE_PTR;
726 
727         /*
728          * Quickly re-inject items left on "txlist".
729          */
730         list_splice(&txlist, &ep->rdllist);
731         __pm_relax(ep->ws);
732 
733         if (!list_empty(&ep->rdllist)) {
734                 /*
735                  * Wake up (if active) both the eventpoll wait list and
736                  * the ->poll() wait list (delayed after we release the lock).
737                  */
738                 if (waitqueue_active(&ep->wq))
739                         wake_up_locked(&ep->wq);
740                 if (waitqueue_active(&ep->poll_wait))
741                         pwake++;
742         }
743         spin_unlock_irqrestore(&ep->lock, flags);
744 
745         if (!ep_locked)
746                 mutex_unlock(&ep->mtx);
747 
748         /* We have to call this outside the lock */
749         if (pwake)
750                 ep_poll_safewake(&ep->poll_wait);
751 
752         return res;
753 }
754 
755 static void epi_rcu_free(struct rcu_head *head)
756 {
757         struct epitem *epi = container_of(head, struct epitem, rcu);
758         kmem_cache_free(epi_cache, epi);
759 }
760 
761 /*
762  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
763  * all the associated resources. Must be called with "mtx" held.
764  */
765 static int ep_remove(struct eventpoll *ep, struct epitem *epi)
766 {
767         unsigned long flags;
768         struct file *file = epi->ffd.file;
769 
770         /*
771          * Removes poll wait queue hooks. We _have_ to do this without holding
772          * the "ep->lock" otherwise a deadlock might occur. This because of the
773          * sequence of the lock acquisition. Here we do "ep->lock" then the wait
774          * queue head lock when unregistering the wait queue. The wakeup callback
775          * will run by holding the wait queue head lock and will call our callback
776          * that will try to get "ep->lock".
777          */
778         ep_unregister_pollwait(ep, epi);
779 
780         /* Remove the current item from the list of epoll hooks */
781         spin_lock(&file->f_lock);
782         list_del_rcu(&epi->fllink);
783         spin_unlock(&file->f_lock);
784 
785         rb_erase_cached(&epi->rbn, &ep->rbr);
786 
787         spin_lock_irqsave(&ep->lock, flags);
788         if (ep_is_linked(&epi->rdllink))
789                 list_del_init(&epi->rdllink);
790         spin_unlock_irqrestore(&ep->lock, flags);
791 
792         wakeup_source_unregister(ep_wakeup_source(epi));
793         /*
794          * At this point it is safe to free the eventpoll item. Use the union
795          * field epi->rcu, since we are trying to minimize the size of
796          * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
797          * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
798          * use of the rbn field.
799          */
800         call_rcu(&epi->rcu, epi_rcu_free);
801 
802         atomic_long_dec(&ep->user->epoll_watches);
803 
804         return 0;
805 }
806 
807 static void ep_free(struct eventpoll *ep)
808 {
809         struct rb_node *rbp;
810         struct epitem *epi;
811 
812         /* We need to release all tasks waiting for these file */
813         if (waitqueue_active(&ep->poll_wait))
814                 ep_poll_safewake(&ep->poll_wait);
815 
816         /*
817          * We need to lock this because we could be hit by
818          * eventpoll_release_file() while we're freeing the "struct eventpoll".
819          * We do not need to hold "ep->mtx" here because the epoll file
820          * is on the way to be removed and no one has references to it
821          * anymore. The only hit might come from eventpoll_release_file() but
822          * holding "epmutex" is sufficient here.
823          */
824         mutex_lock(&epmutex);
825 
826         /*
827          * Walks through the whole tree by unregistering poll callbacks.
828          */
829         for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
830                 epi = rb_entry(rbp, struct epitem, rbn);
831 
832                 ep_unregister_pollwait(ep, epi);
833                 cond_resched();
834         }
835 
836         /*
837          * Walks through the whole tree by freeing each "struct epitem". At this
838          * point we are sure no poll callbacks will be lingering around, and also by
839          * holding "epmutex" we can be sure that no file cleanup code will hit
840          * us during this operation. So we can avoid the lock on "ep->lock".
841          * We do not need to lock ep->mtx, either, we only do it to prevent
842          * a lockdep warning.
843          */
844         mutex_lock(&ep->mtx);
845         while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
846                 epi = rb_entry(rbp, struct epitem, rbn);
847                 ep_remove(ep, epi);
848                 cond_resched();
849         }
850         mutex_unlock(&ep->mtx);
851 
852         mutex_unlock(&epmutex);
853         mutex_destroy(&ep->mtx);
854         free_uid(ep->user);
855         wakeup_source_unregister(ep->ws);
856         kfree(ep);
857 }
858 
859 static int ep_eventpoll_release(struct inode *inode, struct file *file)
860 {
861         struct eventpoll *ep = file->private_data;
862 
863         if (ep)
864                 ep_free(ep);
865 
866         return 0;
867 }
868 
869 static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
870                                void *priv);
871 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
872                                  poll_table *pt);
873 
874 /*
875  * Differs from ep_eventpoll_poll() in that internal callers already have
876  * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
877  * is correctly annotated.
878  */
879 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
880                                  int depth)
881 {
882         struct eventpoll *ep;
883         bool locked;
884 
885         pt->_key = epi->event.events;
886         if (!is_file_epoll(epi->ffd.file))
887                 return vfs_poll(epi->ffd.file, pt) & epi->event.events;
888 
889         ep = epi->ffd.file->private_data;
890         poll_wait(epi->ffd.file, &ep->poll_wait, pt);
891         locked = pt && (pt->_qproc == ep_ptable_queue_proc);
892 
893         return ep_scan_ready_list(epi->ffd.file->private_data,
894                                   ep_read_events_proc, &depth, depth,
895                                   locked) & epi->event.events;
896 }
897 
898 static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
899                                void *priv)
900 {
901         struct epitem *epi, *tmp;
902         poll_table pt;
903         int depth = *(int *)priv;
904 
905         init_poll_funcptr(&pt, NULL);
906         depth++;
907 
908         list_for_each_entry_safe(epi, tmp, head, rdllink) {
909                 if (ep_item_poll(epi, &pt, depth)) {
910                         return EPOLLIN | EPOLLRDNORM;
911                 } else {
912                         /*
913                          * Item has been dropped into the ready list by the poll
914                          * callback, but it's not actually ready, as far as
915                          * caller requested events goes. We can remove it here.
916                          */
917                         __pm_relax(ep_wakeup_source(epi));
918                         list_del_init(&epi->rdllink);
919                 }
920         }
921 
922         return 0;
923 }
924 
925 static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
926 {
927         struct eventpoll *ep = file->private_data;
928         int depth = 0;
929 
930         /* Insert inside our poll wait queue */
931         poll_wait(file, &ep->poll_wait, wait);
932 
933         /*
934          * Proceed to find out if wanted events are really available inside
935          * the ready list.
936          */
937         return ep_scan_ready_list(ep, ep_read_events_proc,
938                                   &depth, depth, false);
939 }
940 
941 #ifdef CONFIG_PROC_FS
942 static void ep_show_fdinfo(struct seq_file *m, struct file *f)
943 {
944         struct eventpoll *ep = f->private_data;
945         struct rb_node *rbp;
946 
947         mutex_lock(&ep->mtx);
948         for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
949                 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
950                 struct inode *inode = file_inode(epi->ffd.file);
951 
952                 seq_printf(m, "tfd: %8d events: %8x data: %16llx "
953                            " pos:%lli ino:%lx sdev:%x\n",
954                            epi->ffd.fd, epi->event.events,
955                            (long long)epi->event.data,
956                            (long long)epi->ffd.file->f_pos,
957                            inode->i_ino, inode->i_sb->s_dev);
958                 if (seq_has_overflowed(m))
959                         break;
960         }
961         mutex_unlock(&ep->mtx);
962 }
963 #endif
964 
965 /* File callbacks that implement the eventpoll file behaviour */
966 static const struct file_operations eventpoll_fops = {
967 #ifdef CONFIG_PROC_FS
968         .show_fdinfo    = ep_show_fdinfo,
969 #endif
970         .release        = ep_eventpoll_release,
971         .poll           = ep_eventpoll_poll,
972         .llseek         = noop_llseek,
973 };
974 
975 /*
976  * This is called from eventpoll_release() to unlink files from the eventpoll
977  * interface. We need to have this facility to cleanup correctly files that are
978  * closed without being removed from the eventpoll interface.
979  */
980 void eventpoll_release_file(struct file *file)
981 {
982         struct eventpoll *ep;
983         struct epitem *epi, *next;
984 
985         /*
986          * We don't want to get "file->f_lock" because it is not
987          * necessary. It is not necessary because we're in the "struct file"
988          * cleanup path, and this means that no one is using this file anymore.
989          * So, for example, epoll_ctl() cannot hit here since if we reach this
990          * point, the file counter already went to zero and fget() would fail.
991          * The only hit might come from ep_free() but by holding the mutex
992          * will correctly serialize the operation. We do need to acquire
993          * "ep->mtx" after "epmutex" because ep_remove() requires it when called
994          * from anywhere but ep_free().
995          *
996          * Besides, ep_remove() acquires the lock, so we can't hold it here.
997          */
998         mutex_lock(&epmutex);
999         list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
1000                 ep = epi->ep;
1001                 mutex_lock_nested(&ep->mtx, 0);
1002                 ep_remove(ep, epi);
1003                 mutex_unlock(&ep->mtx);
1004         }
1005         mutex_unlock(&epmutex);
1006 }
1007 
1008 static int ep_alloc(struct eventpoll **pep)
1009 {
1010         int error;
1011         struct user_struct *user;
1012         struct eventpoll *ep;
1013 
1014         user = get_current_user();
1015         error = -ENOMEM;
1016         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1017         if (unlikely(!ep))
1018                 goto free_uid;
1019 
1020         spin_lock_init(&ep->lock);
1021         mutex_init(&ep->mtx);
1022         init_waitqueue_head(&ep->wq);
1023         init_waitqueue_head(&ep->poll_wait);
1024         INIT_LIST_HEAD(&ep->rdllist);
1025         ep->rbr = RB_ROOT_CACHED;
1026         ep->ovflist = EP_UNACTIVE_PTR;
1027         ep->user = user;
1028 
1029         *pep = ep;
1030 
1031         return 0;
1032 
1033 free_uid:
1034         free_uid(user);
1035         return error;
1036 }
1037 
1038 /*
1039  * Search the file inside the eventpoll tree. The RB tree operations
1040  * are protected by the "mtx" mutex, and ep_find() must be called with
1041  * "mtx" held.
1042  */
1043 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
1044 {
1045         int kcmp;
1046         struct rb_node *rbp;
1047         struct epitem *epi, *epir = NULL;
1048         struct epoll_filefd ffd;
1049 
1050         ep_set_ffd(&ffd, file, fd);
1051         for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
1052                 epi = rb_entry(rbp, struct epitem, rbn);
1053                 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
1054                 if (kcmp > 0)
1055                         rbp = rbp->rb_right;
1056                 else if (kcmp < 0)
1057                         rbp = rbp->rb_left;
1058                 else {
1059                         epir = epi;
1060                         break;
1061                 }
1062         }
1063 
1064         return epir;
1065 }
1066 
1067 #ifdef CONFIG_CHECKPOINT_RESTORE
1068 static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
1069 {
1070         struct rb_node *rbp;
1071         struct epitem *epi;
1072 
1073         for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1074                 epi = rb_entry(rbp, struct epitem, rbn);
1075                 if (epi->ffd.fd == tfd) {
1076                         if (toff == 0)
1077                                 return epi;
1078                         else
1079                                 toff--;
1080                 }
1081                 cond_resched();
1082         }
1083 
1084         return NULL;
1085 }
1086 
1087 struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
1088                                      unsigned long toff)
1089 {
1090         struct file *file_raw;
1091         struct eventpoll *ep;
1092         struct epitem *epi;
1093 
1094         if (!is_file_epoll(file))
1095                 return ERR_PTR(-EINVAL);
1096 
1097         ep = file->private_data;
1098 
1099         mutex_lock(&ep->mtx);
1100         epi = ep_find_tfd(ep, tfd, toff);
1101         if (epi)
1102                 file_raw = epi->ffd.file;
1103         else
1104                 file_raw = ERR_PTR(-ENOENT);
1105         mutex_unlock(&ep->mtx);
1106 
1107         return file_raw;
1108 }
1109 #endif /* CONFIG_CHECKPOINT_RESTORE */
1110 
1111 /*
1112  * This is the callback that is passed to the wait queue wakeup
1113  * mechanism. It is called by the stored file descriptors when they
1114  * have events to report.
1115  */
1116 static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1117 {
1118         int pwake = 0;
1119         unsigned long flags;
1120         struct epitem *epi = ep_item_from_wait(wait);
1121         struct eventpoll *ep = epi->ep;
1122         __poll_t pollflags = key_to_poll(key);
1123         int ewake = 0;
1124 
1125         spin_lock_irqsave(&ep->lock, flags);
1126 
1127         ep_set_busy_poll_napi_id(epi);
1128 
1129         /*
1130          * If the event mask does not contain any poll(2) event, we consider the
1131          * descriptor to be disabled. This condition is likely the effect of the
1132          * EPOLLONESHOT bit that disables the descriptor when an event is received,
1133          * until the next EPOLL_CTL_MOD will be issued.
1134          */
1135         if (!(epi->event.events & ~EP_PRIVATE_BITS))
1136                 goto out_unlock;
1137 
1138         /*
1139          * Check the events coming with the callback. At this stage, not
1140          * every device reports the events in the "key" parameter of the
1141          * callback. We need to be able to handle both cases here, hence the
1142          * test for "key" != NULL before the event match test.
1143          */
1144         if (pollflags && !(pollflags & epi->event.events))
1145                 goto out_unlock;
1146 
1147         /*
1148          * If we are transferring events to userspace, we can hold no locks
1149          * (because we're accessing user memory, and because of linux f_op->poll()
1150          * semantics). All the events that happen during that period of time are
1151          * chained in ep->ovflist and requeued later on.
1152          */
1153         if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
1154                 if (epi->next == EP_UNACTIVE_PTR) {
1155                         epi->next = ep->ovflist;
1156                         ep->ovflist = epi;
1157                         if (epi->ws) {
1158                                 /*
1159                                  * Activate ep->ws since epi->ws may get
1160                                  * deactivated at any time.
1161                                  */
1162                                 __pm_stay_awake(ep->ws);
1163                         }
1164 
1165                 }
1166                 goto out_unlock;
1167         }
1168 
1169         /* If this file is already in the ready list we exit soon */
1170         if (!ep_is_linked(&epi->rdllink)) {
1171                 list_add_tail(&epi->rdllink, &ep->rdllist);
1172                 ep_pm_stay_awake_rcu(epi);
1173         }
1174 
1175         /*
1176          * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1177          * wait list.
1178          */
1179         if (waitqueue_active(&ep->wq)) {
1180                 if ((epi->event.events & EPOLLEXCLUSIVE) &&
1181                                         !(pollflags & POLLFREE)) {
1182                         switch (pollflags & EPOLLINOUT_BITS) {
1183                         case EPOLLIN:
1184                                 if (epi->event.events & EPOLLIN)
1185                                         ewake = 1;
1186                                 break;
1187                         case EPOLLOUT:
1188                                 if (epi->event.events & EPOLLOUT)
1189                                         ewake = 1;
1190                                 break;
1191                         case 0:
1192                                 ewake = 1;
1193                                 break;
1194                         }
1195                 }
1196                 wake_up_locked(&ep->wq);
1197         }
1198         if (waitqueue_active(&ep->poll_wait))
1199                 pwake++;
1200 
1201 out_unlock:
1202         spin_unlock_irqrestore(&ep->lock, flags);
1203 
1204         /* We have to call this outside the lock */
1205         if (pwake)
1206                 ep_poll_safewake(&ep->poll_wait);
1207 
1208         if (!(epi->event.events & EPOLLEXCLUSIVE))
1209                 ewake = 1;
1210 
1211         if (pollflags & POLLFREE) {
1212                 /*
1213                  * If we race with ep_remove_wait_queue() it can miss
1214                  * ->whead = NULL and do another remove_wait_queue() after
1215                  * us, so we can't use __remove_wait_queue().
1216                  */
1217                 list_del_init(&wait->entry);
1218                 /*
1219                  * ->whead != NULL protects us from the race with ep_free()
1220                  * or ep_remove(), ep_remove_wait_queue() takes whead->lock
1221                  * held by the caller. Once we nullify it, nothing protects
1222                  * ep/epi or even wait.
1223                  */
1224                 smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1225         }
1226 
1227         return ewake;
1228 }
1229 
1230 /*
1231  * This is the callback that is used to add our wait queue to the
1232  * target file wakeup lists.
1233  */
1234 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1235                                  poll_table *pt)
1236 {
1237         struct epitem *epi = ep_item_from_epqueue(pt);
1238         struct eppoll_entry *pwq;
1239 
1240         if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
1241                 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1242                 pwq->whead = whead;
1243                 pwq->base = epi;
1244                 if (epi->event.events & EPOLLEXCLUSIVE)
1245                         add_wait_queue_exclusive(whead, &pwq->wait);
1246                 else
1247                         add_wait_queue(whead, &pwq->wait);
1248                 list_add_tail(&pwq->llink, &epi->pwqlist);
1249                 epi->nwait++;
1250         } else {
1251                 /* We have to signal that an error occurred */
1252                 epi->nwait = -1;
1253         }
1254 }
1255 
1256 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1257 {
1258         int kcmp;
1259         struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
1260         struct epitem *epic;
1261         bool leftmost = true;
1262 
1263         while (*p) {
1264                 parent = *p;
1265                 epic = rb_entry(parent, struct epitem, rbn);
1266                 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1267                 if (kcmp > 0) {
1268                         p = &parent->rb_right;
1269                         leftmost = false;
1270                 } else
1271                         p = &parent->rb_left;
1272         }
1273         rb_link_node(&epi->rbn, parent, p);
1274         rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1275 }
1276 
1277 
1278 
1279 #define PATH_ARR_SIZE 5
1280 /*
1281  * These are the number paths of length 1 to 5, that we are allowing to emanate
1282  * from a single file of interest. For example, we allow 1000 paths of length
1283  * 1, to emanate from each file of interest. This essentially represents the
1284  * potential wakeup paths, which need to be limited in order to avoid massive
1285  * uncontrolled wakeup storms. The common use case should be a single ep which
1286  * is connected to n file sources. In this case each file source has 1 path
1287  * of length 1. Thus, the numbers below should be more than sufficient. These
1288  * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1289  * and delete can't add additional paths. Protected by the epmutex.
1290  */
1291 static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1292 static int path_count[PATH_ARR_SIZE];
1293 
1294 static int path_count_inc(int nests)
1295 {
1296         /* Allow an arbitrary number of depth 1 paths */
1297         if (nests == 0)
1298                 return 0;
1299 
1300         if (++path_count[nests] > path_limits[nests])
1301                 return -1;
1302         return 0;
1303 }
1304 
1305 static void path_count_init(void)
1306 {
1307         int i;
1308 
1309         for (i = 0; i < PATH_ARR_SIZE; i++)
1310                 path_count[i] = 0;
1311 }
1312 
1313 static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1314 {
1315         int error = 0;
1316         struct file *file = priv;
1317         struct file *child_file;
1318         struct epitem *epi;
1319 
1320         /* CTL_DEL can remove links here, but that can't increase our count */
1321         rcu_read_lock();
1322         list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
1323                 child_file = epi->ep->file;
1324                 if (is_file_epoll(child_file)) {
1325                         if (list_empty(&child_file->f_ep_links)) {
1326                                 if (path_count_inc(call_nests)) {
1327                                         error = -1;
1328                                         break;
1329                                 }
1330                         } else {
1331                                 error = ep_call_nested(&poll_loop_ncalls,
1332                                                         EP_MAX_NESTS,
1333                                                         reverse_path_check_proc,
1334                                                         child_file, child_file,
1335                                                         current);
1336                         }
1337                         if (error != 0)
1338                                 break;
1339                 } else {
1340                         printk(KERN_ERR "reverse_path_check_proc: "
1341                                 "file is not an ep!\n");
1342                 }
1343         }
1344         rcu_read_unlock();
1345         return error;
1346 }
1347 
1348 /**
1349  * reverse_path_check - The tfile_check_list is list of file *, which have
1350  *                      links that are proposed to be newly added. We need to
1351  *                      make sure that those added links don't add too many
1352  *                      paths such that we will spend all our time waking up
1353  *                      eventpoll objects.
1354  *
1355  * Returns: Returns zero if the proposed links don't create too many paths,
1356  *          -1 otherwise.
1357  */
1358 static int reverse_path_check(void)
1359 {
1360         int error = 0;
1361         struct file *current_file;
1362 
1363         /* let's call this for all tfiles */
1364         list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
1365                 path_count_init();
1366                 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1367                                         reverse_path_check_proc, current_file,
1368                                         current_file, current);
1369                 if (error)
1370                         break;
1371         }
1372         return error;
1373 }
1374 
1375 static int ep_create_wakeup_source(struct epitem *epi)
1376 {
1377         const char *name;
1378         struct wakeup_source *ws;
1379 
1380         if (!epi->ep->ws) {
1381                 epi->ep->ws = wakeup_source_register("eventpoll");
1382                 if (!epi->ep->ws)
1383                         return -ENOMEM;
1384         }
1385 
1386         name = epi->ffd.file->f_path.dentry->d_name.name;
1387         ws = wakeup_source_register(name);
1388 
1389         if (!ws)
1390                 return -ENOMEM;
1391         rcu_assign_pointer(epi->ws, ws);
1392 
1393         return 0;
1394 }
1395 
1396 /* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
1397 static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1398 {
1399         struct wakeup_source *ws = ep_wakeup_source(epi);
1400 
1401         RCU_INIT_POINTER(epi->ws, NULL);
1402 
1403         /*
1404          * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1405          * used internally by wakeup_source_remove, too (called by
1406          * wakeup_source_unregister), so we cannot use call_rcu
1407          */
1408         synchronize_rcu();
1409         wakeup_source_unregister(ws);
1410 }
1411 
1412 /*
1413  * Must be called with "mtx" held.
1414  */
1415 static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1416                      struct file *tfile, int fd, int full_check)
1417 {
1418         int error, pwake = 0;
1419         __poll_t revents;
1420         unsigned long flags;
1421         long user_watches;
1422         struct epitem *epi;
1423         struct ep_pqueue epq;
1424 
1425         user_watches = atomic_long_read(&ep->user->epoll_watches);
1426         if (unlikely(user_watches >= max_user_watches))
1427                 return -ENOSPC;
1428         if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1429                 return -ENOMEM;
1430 
1431         /* Item initialization follow here ... */
1432         INIT_LIST_HEAD(&epi->rdllink);
1433         INIT_LIST_HEAD(&epi->fllink);
1434         INIT_LIST_HEAD(&epi->pwqlist);
1435         epi->ep = ep;
1436         ep_set_ffd(&epi->ffd, tfile, fd);
1437         epi->event = *event;
1438         epi->nwait = 0;
1439         epi->next = EP_UNACTIVE_PTR;
1440         if (epi->event.events & EPOLLWAKEUP) {
1441                 error = ep_create_wakeup_source(epi);
1442                 if (error)
1443                         goto error_create_wakeup_source;
1444         } else {
1445                 RCU_INIT_POINTER(epi->ws, NULL);
1446         }
1447 
1448         /* Initialize the poll table using the queue callback */
1449         epq.epi = epi;
1450         init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1451 
1452         /*
1453          * Attach the item to the poll hooks and get current event bits.
1454          * We can safely use the file* here because its usage count has
1455          * been increased by the caller of this function. Note that after
1456          * this operation completes, the poll callback can start hitting
1457          * the new item.
1458          */
1459         revents = ep_item_poll(epi, &epq.pt, 1);
1460 
1461         /*
1462          * We have to check if something went wrong during the poll wait queue
1463          * install process. Namely an allocation for a wait queue failed due
1464          * high memory pressure.
1465          */
1466         error = -ENOMEM;
1467         if (epi->nwait < 0)
1468                 goto error_unregister;
1469 
1470         /* Add the current item to the list of active epoll hook for this file */
1471         spin_lock(&tfile->f_lock);
1472         list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1473         spin_unlock(&tfile->f_lock);
1474 
1475         /*
1476          * Add the current item to the RB tree. All RB tree operations are
1477          * protected by "mtx", and ep_insert() is called with "mtx" held.
1478          */
1479         ep_rbtree_insert(ep, epi);
1480 
1481         /* now check if we've created too many backpaths */
1482         error = -EINVAL;
1483         if (full_check && reverse_path_check())
1484                 goto error_remove_epi;
1485 
1486         /* We have to drop the new item inside our item list to keep track of it */
1487         spin_lock_irqsave(&ep->lock, flags);
1488 
1489         /* record NAPI ID of new item if present */
1490         ep_set_busy_poll_napi_id(epi);
1491 
1492         /* If the file is already "ready" we drop it inside the ready list */
1493         if (revents && !ep_is_linked(&epi->rdllink)) {
1494                 list_add_tail(&epi->rdllink, &ep->rdllist);
1495                 ep_pm_stay_awake(epi);
1496 
1497                 /* Notify waiting tasks that events are available */
1498                 if (waitqueue_active(&ep->wq))
1499                         wake_up_locked(&ep->wq);
1500                 if (waitqueue_active(&ep->poll_wait))
1501                         pwake++;
1502         }
1503 
1504         spin_unlock_irqrestore(&ep->lock, flags);
1505 
1506         atomic_long_inc(&ep->user->epoll_watches);
1507 
1508         /* We have to call this outside the lock */
1509         if (pwake)
1510                 ep_poll_safewake(&ep->poll_wait);
1511 
1512         return 0;
1513 
1514 error_remove_epi:
1515         spin_lock(&tfile->f_lock);
1516         list_del_rcu(&epi->fllink);
1517         spin_unlock(&tfile->f_lock);
1518 
1519         rb_erase_cached(&epi->rbn, &ep->rbr);
1520 
1521 error_unregister:
1522         ep_unregister_pollwait(ep, epi);
1523 
1524         /*
1525          * We need to do this because an event could have been arrived on some
1526          * allocated wait queue. Note that we don't care about the ep->ovflist
1527          * list, since that is used/cleaned only inside a section bound by "mtx".
1528          * And ep_insert() is called with "mtx" held.
1529          */
1530         spin_lock_irqsave(&ep->lock, flags);
1531         if (ep_is_linked(&epi->rdllink))
1532                 list_del_init(&epi->rdllink);
1533         spin_unlock_irqrestore(&ep->lock, flags);
1534 
1535         wakeup_source_unregister(ep_wakeup_source(epi));
1536 
1537 error_create_wakeup_source:
1538         kmem_cache_free(epi_cache, epi);
1539 
1540         return error;
1541 }
1542 
1543 /*
1544  * Modify the interest event mask by dropping an event if the new mask
1545  * has a match in the current file status. Must be called with "mtx" held.
1546  */
1547 static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1548                      const struct epoll_event *event)
1549 {
1550         int pwake = 0;
1551         poll_table pt;
1552 
1553         init_poll_funcptr(&pt, NULL);
1554 
1555         /*
1556          * Set the new event interest mask before calling f_op->poll();
1557          * otherwise we might miss an event that happens between the
1558          * f_op->poll() call and the new event set registering.
1559          */
1560         epi->event.events = event->events; /* need barrier below */
1561         epi->event.data = event->data; /* protected by mtx */
1562         if (epi->event.events & EPOLLWAKEUP) {
1563                 if (!ep_has_wakeup_source(epi))
1564                         ep_create_wakeup_source(epi);
1565         } else if (ep_has_wakeup_source(epi)) {
1566                 ep_destroy_wakeup_source(epi);
1567         }
1568 
1569         /*
1570          * The following barrier has two effects:
1571          *
1572          * 1) Flush epi changes above to other CPUs.  This ensures
1573          *    we do not miss events from ep_poll_callback if an
1574          *    event occurs immediately after we call f_op->poll().
1575          *    We need this because we did not take ep->lock while
1576          *    changing epi above (but ep_poll_callback does take
1577          *    ep->lock).
1578          *
1579          * 2) We also need to ensure we do not miss _past_ events
1580          *    when calling f_op->poll().  This barrier also
1581          *    pairs with the barrier in wq_has_sleeper (see
1582          *    comments for wq_has_sleeper).
1583          *
1584          * This barrier will now guarantee ep_poll_callback or f_op->poll
1585          * (or both) will notice the readiness of an item.
1586          */
1587         smp_mb();
1588 
1589         /*
1590          * Get current event bits. We can safely use the file* here because
1591          * its usage count has been increased by the caller of this function.
1592          * If the item is "hot" and it is not registered inside the ready
1593          * list, push it inside.
1594          */
1595         if (ep_item_poll(epi, &pt, 1)) {
1596                 spin_lock_irq(&ep->lock);
1597                 if (!ep_is_linked(&epi->rdllink)) {
1598                         list_add_tail(&epi->rdllink, &ep->rdllist);
1599                         ep_pm_stay_awake(epi);
1600 
1601                         /* Notify waiting tasks that events are available */
1602                         if (waitqueue_active(&ep->wq))
1603                                 wake_up_locked(&ep->wq);
1604                         if (waitqueue_active(&ep->poll_wait))
1605                                 pwake++;
1606                 }
1607                 spin_unlock_irq(&ep->lock);
1608         }
1609 
1610         /* We have to call this outside the lock */
1611         if (pwake)
1612                 ep_poll_safewake(&ep->poll_wait);
1613 
1614         return 0;
1615 }
1616 
1617 static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1618                                void *priv)
1619 {
1620         struct ep_send_events_data *esed = priv;
1621         __poll_t revents;
1622         struct epitem *epi;
1623         struct epoll_event __user *uevent;
1624         struct wakeup_source *ws;
1625         poll_table pt;
1626 
1627         init_poll_funcptr(&pt, NULL);
1628 
1629         /*
1630          * We can loop without lock because we are passed a task private list.
1631          * Items cannot vanish during the loop because ep_scan_ready_list() is
1632          * holding "mtx" during this call.
1633          */
1634         for (esed->res = 0, uevent = esed->events;
1635              !list_empty(head) && esed->res < esed->maxevents;) {
1636                 epi = list_first_entry(head, struct epitem, rdllink);
1637 
1638                 /*
1639                  * Activate ep->ws before deactivating epi->ws to prevent
1640                  * triggering auto-suspend here (in case we reactive epi->ws
1641                  * below).
1642                  *
1643                  * This could be rearranged to delay the deactivation of epi->ws
1644                  * instead, but then epi->ws would temporarily be out of sync
1645                  * with ep_is_linked().
1646                  */
1647                 ws = ep_wakeup_source(epi);
1648                 if (ws) {
1649                         if (ws->active)
1650                                 __pm_stay_awake(ep->ws);
1651                         __pm_relax(ws);
1652                 }
1653 
1654                 list_del_init(&epi->rdllink);
1655 
1656                 revents = ep_item_poll(epi, &pt, 1);
1657 
1658                 /*
1659                  * If the event mask intersect the caller-requested one,
1660                  * deliver the event to userspace. Again, ep_scan_ready_list()
1661                  * is holding "mtx", so no operations coming from userspace
1662                  * can change the item.
1663                  */
1664                 if (revents) {
1665                         if (__put_user(revents, &uevent->events) ||
1666                             __put_user(epi->event.data, &uevent->data)) {
1667                                 list_add(&epi->rdllink, head);
1668                                 ep_pm_stay_awake(epi);
1669                                 if (!esed->res)
1670                                         esed->res = -EFAULT;
1671                                 return 0;
1672                         }
1673                         esed->res++;
1674                         uevent++;
1675                         if (epi->event.events & EPOLLONESHOT)
1676                                 epi->event.events &= EP_PRIVATE_BITS;
1677                         else if (!(epi->event.events & EPOLLET)) {
1678                                 /*
1679                                  * If this file has been added with Level
1680                                  * Trigger mode, we need to insert back inside
1681                                  * the ready list, so that the next call to
1682                                  * epoll_wait() will check again the events
1683                                  * availability. At this point, no one can insert
1684                                  * into ep->rdllist besides us. The epoll_ctl()
1685                                  * callers are locked out by
1686                                  * ep_scan_ready_list() holding "mtx" and the
1687                                  * poll callback will queue them in ep->ovflist.
1688                                  */
1689                                 list_add_tail(&epi->rdllink, &ep->rdllist);
1690                                 ep_pm_stay_awake(epi);
1691                         }
1692                 }
1693         }
1694 
1695         return 0;
1696 }
1697 
1698 static int ep_send_events(struct eventpoll *ep,
1699                           struct epoll_event __user *events, int maxevents)
1700 {
1701         struct ep_send_events_data esed;
1702 
1703         esed.maxevents = maxevents;
1704         esed.events = events;
1705 
1706         ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
1707         return esed.res;
1708 }
1709 
1710 static inline struct timespec64 ep_set_mstimeout(long ms)
1711 {
1712         struct timespec64 now, ts = {
1713                 .tv_sec = ms / MSEC_PER_SEC,
1714                 .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
1715         };
1716 
1717         ktime_get_ts64(&now);
1718         return timespec64_add_safe(now, ts);
1719 }
1720 
1721 /**
1722  * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1723  *           event buffer.
1724  *
1725  * @ep: Pointer to the eventpoll context.
1726  * @events: Pointer to the userspace buffer where the ready events should be
1727  *          stored.
1728  * @maxevents: Size (in terms of number of events) of the caller event buffer.
1729  * @timeout: Maximum timeout for the ready events fetch operation, in
1730  *           milliseconds. If the @timeout is zero, the function will not block,
1731  *           while if the @timeout is less than zero, the function will block
1732  *           until at least one event has been retrieved (or an error
1733  *           occurred).
1734  *
1735  * Returns: Returns the number of ready events which have been fetched, or an
1736  *          error code, in case of error.
1737  */
1738 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1739                    int maxevents, long timeout)
1740 {
1741         int res = 0, eavail, timed_out = 0;
1742         unsigned long flags;
1743         u64 slack = 0;
1744         wait_queue_entry_t wait;
1745         ktime_t expires, *to = NULL;
1746 
1747         if (timeout > 0) {
1748                 struct timespec64 end_time = ep_set_mstimeout(timeout);
1749 
1750                 slack = select_estimate_accuracy(&end_time);
1751                 to = &expires;
1752                 *to = timespec64_to_ktime(end_time);
1753         } else if (timeout == 0) {
1754                 /*
1755                  * Avoid the unnecessary trip to the wait queue loop, if the
1756                  * caller specified a non blocking operation.
1757                  */
1758                 timed_out = 1;
1759                 spin_lock_irqsave(&ep->lock, flags);
1760                 goto check_events;
1761         }
1762 
1763 fetch_events:
1764 
1765         if (!ep_events_available(ep))
1766                 ep_busy_loop(ep, timed_out);
1767 
1768         spin_lock_irqsave(&ep->lock, flags);
1769 
1770         if (!ep_events_available(ep)) {
1771                 /*
1772                  * Busy poll timed out.  Drop NAPI ID for now, we can add
1773                  * it back in when we have moved a socket with a valid NAPI
1774                  * ID onto the ready list.
1775                  */
1776                 ep_reset_busy_poll_napi_id(ep);
1777 
1778                 /*
1779                  * We don't have any available event to return to the caller.
1780                  * We need to sleep here, and we will be wake up by
1781                  * ep_poll_callback() when events will become available.
1782                  */
1783                 init_waitqueue_entry(&wait, current);
1784                 __add_wait_queue_exclusive(&ep->wq, &wait);
1785 
1786                 for (;;) {
1787                         /*
1788                          * We don't want to sleep if the ep_poll_callback() sends us
1789                          * a wakeup in between. That's why we set the task state
1790                          * to TASK_INTERRUPTIBLE before doing the checks.
1791                          */
1792                         set_current_state(TASK_INTERRUPTIBLE);
1793                         /*
1794                          * Always short-circuit for fatal signals to allow
1795                          * threads to make a timely exit without the chance of
1796                          * finding more events available and fetching
1797                          * repeatedly.
1798                          */
1799                         if (fatal_signal_pending(current)) {
1800                                 res = -EINTR;
1801                                 break;
1802                         }
1803                         if (ep_events_available(ep) || timed_out)
1804                                 break;
1805                         if (signal_pending(current)) {
1806                                 res = -EINTR;
1807                                 break;
1808                         }
1809 
1810                         spin_unlock_irqrestore(&ep->lock, flags);
1811                         if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
1812                                 timed_out = 1;
1813 
1814                         spin_lock_irqsave(&ep->lock, flags);
1815                 }
1816 
1817                 __remove_wait_queue(&ep->wq, &wait);
1818                 __set_current_state(TASK_RUNNING);
1819         }
1820 check_events:
1821         /* Is it worth to try to dig for events ? */
1822         eavail = ep_events_available(ep);
1823 
1824         spin_unlock_irqrestore(&ep->lock, flags);
1825 
1826         /*
1827          * Try to transfer events to user space. In case we get 0 events and
1828          * there's still timeout left over, we go trying again in search of
1829          * more luck.
1830          */
1831         if (!res && eavail &&
1832             !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1833                 goto fetch_events;
1834 
1835         return res;
1836 }
1837 
1838 /**
1839  * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1840  *                      API, to verify that adding an epoll file inside another
1841  *                      epoll structure, does not violate the constraints, in
1842  *                      terms of closed loops, or too deep chains (which can
1843  *                      result in excessive stack usage).
1844  *
1845  * @priv: Pointer to the epoll file to be currently checked.
1846  * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1847  *          data structure pointer.
1848  * @call_nests: Current dept of the @ep_call_nested() call stack.
1849  *
1850  * Returns: Returns zero if adding the epoll @file inside current epoll
1851  *          structure @ep does not violate the constraints, or -1 otherwise.
1852  */
1853 static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1854 {
1855         int error = 0;
1856         struct file *file = priv;
1857         struct eventpoll *ep = file->private_data;
1858         struct eventpoll *ep_tovisit;
1859         struct rb_node *rbp;
1860         struct epitem *epi;
1861 
1862         mutex_lock_nested(&ep->mtx, call_nests + 1);
1863         ep->visited = 1;
1864         list_add(&ep->visited_list_link, &visited_list);
1865         for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1866                 epi = rb_entry(rbp, struct epitem, rbn);
1867                 if (unlikely(is_file_epoll(epi->ffd.file))) {
1868                         ep_tovisit = epi->ffd.file->private_data;
1869                         if (ep_tovisit->visited)
1870                                 continue;
1871                         error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1872                                         ep_loop_check_proc, epi->ffd.file,
1873                                         ep_tovisit, current);
1874                         if (error != 0)
1875                                 break;
1876                 } else {
1877                         /*
1878                          * If we've reached a file that is not associated with
1879                          * an ep, then we need to check if the newly added
1880                          * links are going to add too many wakeup paths. We do
1881                          * this by adding it to the tfile_check_list, if it's
1882                          * not already there, and calling reverse_path_check()
1883                          * during ep_insert().
1884                          */
1885                         if (list_empty(&epi->ffd.file->f_tfile_llink))
1886                                 list_add(&epi->ffd.file->f_tfile_llink,
1887                                          &tfile_check_list);
1888                 }
1889         }
1890         mutex_unlock(&ep->mtx);
1891 
1892         return error;
1893 }
1894 
1895 /**
1896  * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1897  *                 another epoll file (represented by @ep) does not create
1898  *                 closed loops or too deep chains.
1899  *
1900  * @ep: Pointer to the epoll private data structure.
1901  * @file: Pointer to the epoll file to be checked.
1902  *
1903  * Returns: Returns zero if adding the epoll @file inside current epoll
1904  *          structure @ep does not violate the constraints, or -1 otherwise.
1905  */
1906 static int ep_loop_check(struct eventpoll *ep, struct file *file)
1907 {
1908         int ret;
1909         struct eventpoll *ep_cur, *ep_next;
1910 
1911         ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1912                               ep_loop_check_proc, file, ep, current);
1913         /* clear visited list */
1914         list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1915                                                         visited_list_link) {
1916                 ep_cur->visited = 0;
1917                 list_del(&ep_cur->visited_list_link);
1918         }
1919         return ret;
1920 }
1921 
1922 static void clear_tfile_check_list(void)
1923 {
1924         struct file *file;
1925 
1926         /* first clear the tfile_check_list */
1927         while (!list_empty(&tfile_check_list)) {
1928                 file = list_first_entry(&tfile_check_list, struct file,
1929                                         f_tfile_llink);
1930                 list_del_init(&file->f_tfile_llink);
1931         }
1932         INIT_LIST_HEAD(&tfile_check_list);
1933 }
1934 
1935 /*
1936  * Open an eventpoll file descriptor.
1937  */
1938 static int do_epoll_create(int flags)
1939 {
1940         int error, fd;
1941         struct eventpoll *ep = NULL;
1942         struct file *file;
1943 
1944         /* Check the EPOLL_* constant for consistency.  */
1945         BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1946 
1947         if (flags & ~EPOLL_CLOEXEC)
1948                 return -EINVAL;
1949         /*
1950          * Create the internal data structure ("struct eventpoll").
1951          */
1952         error = ep_alloc(&ep);
1953         if (error < 0)
1954                 return error;
1955         /*
1956          * Creates all the items needed to setup an eventpoll file. That is,
1957          * a file structure and a free file descriptor.
1958          */
1959         fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
1960         if (fd < 0) {
1961                 error = fd;
1962                 goto out_free_ep;
1963         }
1964         file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
1965                                  O_RDWR | (flags & O_CLOEXEC));
1966         if (IS_ERR(file)) {
1967                 error = PTR_ERR(file);
1968                 goto out_free_fd;
1969         }
1970         ep->file = file;
1971         fd_install(fd, file);
1972         return fd;
1973 
1974 out_free_fd:
1975         put_unused_fd(fd);
1976 out_free_ep:
1977         ep_free(ep);
1978         return error;
1979 }
1980 
1981 SYSCALL_DEFINE1(epoll_create1, int, flags)
1982 {
1983         return do_epoll_create(flags);
1984 }
1985 
1986 SYSCALL_DEFINE1(epoll_create, int, size)
1987 {
1988         if (size <= 0)
1989                 return -EINVAL;
1990 
1991         return do_epoll_create(0);
1992 }
1993 
1994 /*
1995  * The following function implements the controller interface for
1996  * the eventpoll file that enables the insertion/removal/change of
1997  * file descriptors inside the interest set.
1998  */
1999 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2000                 struct epoll_event __user *, event)
2001 {
2002         int error;
2003         int full_check = 0;
2004         struct fd f, tf;
2005         struct eventpoll *ep;
2006         struct epitem *epi;
2007         struct epoll_event epds;
2008         struct eventpoll *tep = NULL;
2009 
2010         error = -EFAULT;
2011         if (ep_op_has_event(op) &&
2012             copy_from_user(&epds, event, sizeof(struct epoll_event)))
2013                 goto error_return;
2014 
2015         error = -EBADF;
2016         f = fdget(epfd);
2017         if (!f.file)
2018                 goto error_return;
2019 
2020         /* Get the "struct file *" for the target file */
2021         tf = fdget(fd);
2022         if (!tf.file)
2023                 goto error_fput;
2024 
2025         /* The target file descriptor must support poll */
2026         error = -EPERM;
2027         if (!file_can_poll(tf.file))
2028                 goto error_tgt_fput;
2029 
2030         /* Check if EPOLLWAKEUP is allowed */
2031         if (ep_op_has_event(op))
2032                 ep_take_care_of_epollwakeup(&epds);
2033 
2034         /*
2035          * We have to check that the file structure underneath the file descriptor
2036          * the user passed to us _is_ an eventpoll file. And also we do not permit
2037          * adding an epoll file descriptor inside itself.
2038          */
2039         error = -EINVAL;
2040         if (f.file == tf.file || !is_file_epoll(f.file))
2041                 goto error_tgt_fput;
2042 
2043         /*
2044          * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
2045          * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
2046          * Also, we do not currently supported nested exclusive wakeups.
2047          */
2048         if (ep_op_has_event(op) && (epds.events & EPOLLEXCLUSIVE)) {
2049                 if (op == EPOLL_CTL_MOD)
2050                         goto error_tgt_fput;
2051                 if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
2052                                 (epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
2053                         goto error_tgt_fput;
2054         }
2055 
2056         /*
2057          * At this point it is safe to assume that the "private_data" contains
2058          * our own data structure.
2059          */
2060         ep = f.file->private_data;
2061 
2062         /*
2063          * When we insert an epoll file descriptor, inside another epoll file
2064          * descriptor, there is the change of creating closed loops, which are
2065          * better be handled here, than in more critical paths. While we are
2066          * checking for loops we also determine the list of files reachable
2067          * and hang them on the tfile_check_list, so we can check that we
2068          * haven't created too many possible wakeup paths.
2069          *
2070          * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
2071          * the epoll file descriptor is attaching directly to a wakeup source,
2072          * unless the epoll file descriptor is nested. The purpose of taking the
2073          * 'epmutex' on add is to prevent complex toplogies such as loops and
2074          * deep wakeup paths from forming in parallel through multiple
2075          * EPOLL_CTL_ADD operations.
2076          */
2077         mutex_lock_nested(&ep->mtx, 0);
2078         if (op == EPOLL_CTL_ADD) {
2079                 if (!list_empty(&f.file->f_ep_links) ||
2080                                                 is_file_epoll(tf.file)) {
2081                         full_check = 1;
2082                         mutex_unlock(&ep->mtx);
2083                         mutex_lock(&epmutex);
2084                         if (is_file_epoll(tf.file)) {
2085                                 error = -ELOOP;
2086                                 if (ep_loop_check(ep, tf.file) != 0) {
2087                                         clear_tfile_check_list();
2088                                         goto error_tgt_fput;
2089                                 }
2090                         } else
2091                                 list_add(&tf.file->f_tfile_llink,
2092                                                         &tfile_check_list);
2093                         mutex_lock_nested(&ep->mtx, 0);
2094                         if (is_file_epoll(tf.file)) {
2095                                 tep = tf.file->private_data;
2096                                 mutex_lock_nested(&tep->mtx, 1);
2097                         }
2098                 }
2099         }
2100 
2101         /*
2102          * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
2103          * above, we can be sure to be able to use the item looked up by
2104          * ep_find() till we release the mutex.
2105          */
2106         epi = ep_find(ep, tf.file, fd);
2107 
2108         error = -EINVAL;
2109         switch (op) {
2110         case EPOLL_CTL_ADD:
2111                 if (!epi) {
2112                         epds.events |= EPOLLERR | EPOLLHUP;
2113                         error = ep_insert(ep, &epds, tf.file, fd, full_check);
2114                 } else
2115                         error = -EEXIST;
2116                 if (full_check)
2117                         clear_tfile_check_list();
2118                 break;
2119         case EPOLL_CTL_DEL:
2120                 if (epi)
2121                         error = ep_remove(ep, epi);
2122                 else
2123                         error = -ENOENT;
2124                 break;
2125         case EPOLL_CTL_MOD:
2126                 if (epi) {
2127                         if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2128                                 epds.events |= EPOLLERR | EPOLLHUP;
2129                                 error = ep_modify(ep, epi, &epds);
2130                         }
2131                 } else
2132                         error = -ENOENT;
2133                 break;
2134         }
2135         if (tep != NULL)
2136                 mutex_unlock(&tep->mtx);
2137         mutex_unlock(&ep->mtx);
2138 
2139 error_tgt_fput:
2140         if (full_check)
2141                 mutex_unlock(&epmutex);
2142 
2143         fdput(tf);
2144 error_fput:
2145         fdput(f);
2146 error_return:
2147 
2148         return error;
2149 }
2150 
2151 /*
2152  * Implement the event wait interface for the eventpoll file. It is the kernel
2153  * part of the user space epoll_wait(2).
2154  */
2155 static int do_epoll_wait(int epfd, struct epoll_event __user *events,
2156                          int maxevents, int timeout)
2157 {
2158         int error;
2159         struct fd f;
2160         struct eventpoll *ep;
2161 
2162         /* The maximum number of event must be greater than zero */
2163         if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2164                 return -EINVAL;
2165 
2166         /* Verify that the area passed by the user is writeable */
2167         if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))
2168                 return -EFAULT;
2169 
2170         /* Get the "struct file *" for the eventpoll file */
2171         f = fdget(epfd);
2172         if (!f.file)
2173                 return -EBADF;
2174 
2175         /*
2176          * We have to check that the file structure underneath the fd
2177          * the user passed to us _is_ an eventpoll file.
2178          */
2179         error = -EINVAL;
2180         if (!is_file_epoll(f.file))
2181                 goto error_fput;
2182 
2183         /*
2184          * At this point it is safe to assume that the "private_data" contains
2185          * our own data structure.
2186          */
2187         ep = f.file->private_data;
2188 
2189         /* Time to fish for events ... */
2190         error = ep_poll(ep, events, maxevents, timeout);
2191 
2192 error_fput:
2193         fdput(f);
2194         return error;
2195 }
2196 
2197 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2198                 int, maxevents, int, timeout)
2199 {
2200         return do_epoll_wait(epfd, events, maxevents, timeout);
2201 }
2202 
2203 /*
2204  * Implement the event wait interface for the eventpoll file. It is the kernel
2205  * part of the user space epoll_pwait(2).
2206  */
2207 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2208                 int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2209                 size_t, sigsetsize)
2210 {
2211         int error;
2212         sigset_t ksigmask, sigsaved;
2213 
2214         /*
2215          * If the caller wants a certain signal mask to be set during the wait,
2216          * we apply it here.
2217          */
2218         if (sigmask) {
2219                 if (sigsetsize != sizeof(sigset_t))
2220                         return -EINVAL;
2221                 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
2222                         return -EFAULT;
2223                 sigsaved = current->blocked;
2224                 set_current_blocked(&ksigmask);
2225         }
2226 
2227         error = do_epoll_wait(epfd, events, maxevents, timeout);
2228 
2229         /*
2230          * If we changed the signal mask, we need to restore the original one.
2231          * In case we've got a signal while waiting, we do not restore the
2232          * signal mask yet, and we allow do_signal() to deliver the signal on
2233          * the way back to userspace, before the signal mask is restored.
2234          */
2235         if (sigmask) {
2236                 if (error == -EINTR) {
2237                         memcpy(&current->saved_sigmask, &sigsaved,
2238                                sizeof(sigsaved));
2239                         set_restore_sigmask();
2240                 } else
2241                         set_current_blocked(&sigsaved);
2242         }
2243 
2244         return error;
2245 }
2246 
2247 #ifdef CONFIG_COMPAT
2248 COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2249                         struct epoll_event __user *, events,
2250                         int, maxevents, int, timeout,
2251                         const compat_sigset_t __user *, sigmask,
2252                         compat_size_t, sigsetsize)
2253 {
2254         long err;
2255         sigset_t ksigmask, sigsaved;
2256 
2257         /*
2258          * If the caller wants a certain signal mask to be set during the wait,
2259          * we apply it here.
2260          */
2261         if (sigmask) {
2262                 if (sigsetsize != sizeof(compat_sigset_t))
2263                         return -EINVAL;
2264                 if (get_compat_sigset(&ksigmask, sigmask))
2265                         return -EFAULT;
2266                 sigsaved = current->blocked;
2267                 set_current_blocked(&ksigmask);
2268         }
2269 
2270         err = do_epoll_wait(epfd, events, maxevents, timeout);
2271 
2272         /*
2273          * If we changed the signal mask, we need to restore the original one.
2274          * In case we've got a signal while waiting, we do not restore the
2275          * signal mask yet, and we allow do_signal() to deliver the signal on
2276          * the way back to userspace, before the signal mask is restored.
2277          */
2278         if (sigmask) {
2279                 if (err == -EINTR) {
2280                         memcpy(&current->saved_sigmask, &sigsaved,
2281                                sizeof(sigsaved));
2282                         set_restore_sigmask();
2283                 } else
2284                         set_current_blocked(&sigsaved);
2285         }
2286 
2287         return err;
2288 }
2289 #endif
2290 
2291 static int __init eventpoll_init(void)
2292 {
2293         struct sysinfo si;
2294 
2295         si_meminfo(&si);
2296         /*
2297          * Allows top 4% of lomem to be allocated for epoll watches (per user).
2298          */
2299         max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2300                 EP_ITEM_COST;
2301         BUG_ON(max_user_watches < 0);
2302 
2303         /*
2304          * Initialize the structure used to perform epoll file descriptor
2305          * inclusion loops checks.
2306          */
2307         ep_nested_calls_init(&poll_loop_ncalls);
2308 
2309 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2310         /* Initialize the structure used to perform safe poll wait head wake ups */
2311         ep_nested_calls_init(&poll_safewake_ncalls);
2312 #endif
2313 
2314         /*
2315          * We can have many thousands of epitems, so prevent this from
2316          * using an extra cache line on 64-bit (and smaller) CPUs
2317          */
2318         BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2319 
2320         /* Allocates slab cache used to allocate "struct epitem" items */
2321         epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2322                         0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
2323 
2324         /* Allocates slab cache used to allocate "struct eppoll_entry" */
2325         pwq_cache = kmem_cache_create("eventpoll_pwq",
2326                 sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2327 
2328         return 0;
2329 }
2330 fs_initcall(eventpoll_init);
2331 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp