linux/fs/eventpoll.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  fs/eventpoll.c (Efficient event retrieval implementation)
   4 *  Copyright (C) 2001,...,2009  Davide Libenzi
   5 *
   6 *  Davide Libenzi <davidel@xmailserver.org>
   7 */
   8
   9#include <linux/init.h>
  10#include <linux/kernel.h>
  11#include <linux/sched/signal.h>
  12#include <linux/fs.h>
  13#include <linux/file.h>
  14#include <linux/signal.h>
  15#include <linux/errno.h>
  16#include <linux/mm.h>
  17#include <linux/slab.h>
  18#include <linux/poll.h>
  19#include <linux/string.h>
  20#include <linux/list.h>
  21#include <linux/hash.h>
  22#include <linux/spinlock.h>
  23#include <linux/syscalls.h>
  24#include <linux/rbtree.h>
  25#include <linux/wait.h>
  26#include <linux/eventpoll.h>
  27#include <linux/mount.h>
  28#include <linux/bitops.h>
  29#include <linux/mutex.h>
  30#include <linux/anon_inodes.h>
  31#include <linux/device.h>
  32#include <linux/uaccess.h>
  33#include <asm/io.h>
  34#include <asm/mman.h>
  35#include <linux/atomic.h>
  36#include <linux/proc_fs.h>
  37#include <linux/seq_file.h>
  38#include <linux/compat.h>
  39#include <linux/rculist.h>
  40#include <net/busy_poll.h>
  41
  42/*
  43 * LOCKING:
  44 * There are three level of locking required by epoll :
  45 *
  46 * 1) epnested_mutex (mutex)
  47 * 2) ep->mtx (mutex)
  48 * 3) ep->lock (rwlock)
  49 *
  50 * The acquire order is the one listed above, from 1 to 3.
  51 * We need a rwlock (ep->lock) because we manipulate objects
  52 * from inside the poll callback, that might be triggered from
  53 * a wake_up() that in turn might be called from IRQ context.
  54 * So we can't sleep inside the poll callback and hence we need
  55 * a spinlock. During the event transfer loop (from kernel to
  56 * user space) we could end up sleeping due a copy_to_user(), so
  57 * we need a lock that will allow us to sleep. This lock is a
  58 * mutex (ep->mtx). It is acquired during the event transfer loop,
  59 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
  60 * The epnested_mutex is acquired when inserting an epoll fd onto another
  61 * epoll fd. We do this so that we walk the epoll tree and ensure that this
  62 * insertion does not create a cycle of epoll file descriptors, which
  63 * could lead to deadlock. We need a global mutex to prevent two
  64 * simultaneous inserts (A into B and B into A) from racing and
  65 * constructing a cycle without either insert observing that it is
  66 * going to.
  67 * It is necessary to acquire multiple "ep->mtx"es at once in the
  68 * case when one epoll fd is added to another. In this case, we
  69 * always acquire the locks in the order of nesting (i.e. after
  70 * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
  71 * before e2->mtx). Since we disallow cycles of epoll file
  72 * descriptors, this ensures that the mutexes are well-ordered. In
  73 * order to communicate this nesting to lockdep, when walking a tree
  74 * of epoll file descriptors, we use the current recursion depth as
  75 * the lockdep subkey.
  76 * It is possible to drop the "ep->mtx" and to use the global
  77 * mutex "epnested_mutex" (together with "ep->lock") to have it working,
  78 * but having "ep->mtx" will make the interface more scalable.
  79 * Events that require holding "epnested_mutex" are very rare, while for
  80 * normal operations the epoll private "ep->mtx" will guarantee
  81 * a better scalability.
  82 */
  83
  84/* Epoll private bits inside the event mask */
  85#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
  86
  87#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
  88
  89#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
  90                                EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
  91
  92/* Maximum number of nesting allowed inside epoll sets */
  93#define EP_MAX_NESTS 4
  94
  95#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
  96
  97#define EP_UNACTIVE_PTR ((void *) -1L)
  98
  99#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
 100
 101struct epoll_filefd {
 102        struct file *file;
 103        int fd;
 104} __packed;
 105
 106/* Wait structure used by the poll hooks */
 107struct eppoll_entry {
 108        /* List header used to link this structure to the "struct epitem" */
 109        struct eppoll_entry *next;
 110
 111        /* The "base" pointer is set to the container "struct epitem" */
 112        struct epitem *base;
 113
 114        /*
 115         * Wait queue item that will be linked to the target file wait
 116         * queue head.
 117         */
 118        wait_queue_entry_t wait;
 119
 120        /* The wait queue head that linked the "wait" wait queue item */
 121        wait_queue_head_t *whead;
 122};
 123
 124/*
 125 * Each file descriptor added to the eventpoll interface will
 126 * have an entry of this type linked to the "rbr" RB tree.
 127 * Avoid increasing the size of this struct, there can be many thousands
 128 * of these on a server and we do not want this to take another cache line.
 129 */
 130struct epitem {
 131        union {
 132                /* RB tree node links this structure to the eventpoll RB tree */
 133                struct rb_node rbn;
 134                /* Used to free the struct epitem */
 135                struct rcu_head rcu;
 136        };
 137
 138        /* List header used to link this structure to the eventpoll ready list */
 139        struct list_head rdllink;
 140
 141        /*
 142         * Works together "struct eventpoll"->ovflist in keeping the
 143         * single linked chain of items.
 144         */
 145        struct epitem *next;
 146
 147        /* The file descriptor information this item refers to */
 148        struct epoll_filefd ffd;
 149
 150        /*
 151         * Protected by file->f_lock, true for to-be-released epitem already
 152         * removed from the "struct file" items list; together with
 153         * eventpoll->refcount orchestrates "struct eventpoll" disposal
 154         */
 155        bool dying;
 156
 157        /* List containing poll wait queues */
 158        struct eppoll_entry *pwqlist;
 159
 160        /* The "container" of this item */
 161        struct eventpoll *ep;
 162
 163        /* List header used to link this item to the "struct file" items list */
 164        struct hlist_node fllink;
 165
 166        /* wakeup_source used when EPOLLWAKEUP is set */
 167        struct wakeup_source __rcu *ws;
 168
 169        /* The structure that describe the interested events and the source fd */
 170        struct epoll_event event;
 171};
 172
 173/*
 174 * This structure is stored inside the "private_data" member of the file
 175 * structure and represents the main data structure for the eventpoll
 176 * interface.
 177 */
 178struct eventpoll {
 179        /*
 180         * This mutex is used to ensure that files are not removed
 181         * while epoll is using them. This is held during the event
 182         * collection loop, the file cleanup path, the epoll file exit
 183         * code and the ctl operations.
 184         */
 185        struct mutex mtx;
 186
 187        /* Wait queue used by sys_epoll_wait() */
 188        wait_queue_head_t wq;
 189
 190        /* Wait queue used by file->poll() */
 191        wait_queue_head_t poll_wait;
 192
 193        /* List of ready file descriptors */
 194        struct list_head rdllist;
 195
 196        /* Lock which protects rdllist and ovflist */
 197        rwlock_t lock;
 198
 199        /* RB tree root used to store monitored fd structs */
 200        struct rb_root_cached rbr;
 201
 202        /*
 203         * This is a single linked list that chains all the "struct epitem" that
 204         * happened while transferring ready events to userspace w/out
 205         * holding ->lock.
 206         */
 207        struct epitem *ovflist;
 208
 209        /* wakeup_source used when ep_scan_ready_list is running */
 210        struct wakeup_source *ws;
 211
 212        /* The user that created the eventpoll descriptor */
 213        struct user_struct *user;
 214
 215        struct file *file;
 216
 217        /* used to optimize loop detection check */
 218        u64 gen;
 219        struct hlist_head refs;
 220
 221        /*
 222         * usage count, used together with epitem->dying to
 223         * orchestrate the disposal of this struct
 224         */
 225        refcount_t refcount;
 226
 227#ifdef CONFIG_NET_RX_BUSY_POLL
 228        /* used to track busy poll napi_id */
 229        unsigned int napi_id;
 230#endif
 231
 232#ifdef CONFIG_DEBUG_LOCK_ALLOC
 233        /* tracks wakeup nests for lockdep validation */
 234        u8 nests;
 235#endif
 236};
 237
 238/* Wrapper struct used by poll queueing */
 239struct ep_pqueue {
 240        poll_table pt;
 241        struct epitem *epi;
 242};
 243
 244/*
 245 * Configuration options available inside /proc/sys/fs/epoll/
 246 */
 247/* Maximum number of epoll watched descriptors, per user */
 248static long max_user_watches __read_mostly;
 249
 250/* Used for cycles detection */
 251static DEFINE_MUTEX(epnested_mutex);
 252
 253static u64 loop_check_gen = 0;
 254
 255/* Used to check for epoll file descriptor inclusion loops */
 256static struct eventpoll *inserting_into;
 257
 258/* Slab cache used to allocate "struct epitem" */
 259static struct kmem_cache *epi_cache __ro_after_init;
 260
 261/* Slab cache used to allocate "struct eppoll_entry" */
 262static struct kmem_cache *pwq_cache __ro_after_init;
 263
 264/*
 265 * List of files with newly added links, where we may need to limit the number
 266 * of emanating paths. Protected by the epnested_mutex.
 267 */
 268struct epitems_head {
 269        struct hlist_head epitems;
 270        struct epitems_head *next;
 271};
 272static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;
 273
 274static struct kmem_cache *ephead_cache __ro_after_init;
 275
 276static inline void free_ephead(struct epitems_head *head)
 277{
 278        if (head)
 279                kmem_cache_free(ephead_cache, head);
 280}
 281
 282static void list_file(struct file *file)
 283{
 284        struct epitems_head *head;
 285
 286        head = container_of(file->f_ep, struct epitems_head, epitems);
 287        if (!head->next) {
 288                head->next = tfile_check_list;
 289                tfile_check_list = head;
 290        }
 291}
 292
 293static void unlist_file(struct epitems_head *head)
 294{
 295        struct epitems_head *to_free = head;
 296        struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems));
 297        if (p) {
 298                struct epitem *epi= container_of(p, struct epitem, fllink);
 299                spin_lock(&epi->ffd.file->f_lock);
 300                if (!hlist_empty(&head->epitems))
 301                        to_free = NULL;
 302                head->next = NULL;
 303                spin_unlock(&epi->ffd.file->f_lock);
 304        }
 305        free_ephead(to_free);
 306}
 307
 308#ifdef CONFIG_SYSCTL
 309
 310#include <linux/sysctl.h>
 311
 312static long long_zero;
 313static long long_max = LONG_MAX;
 314
 315static struct ctl_table epoll_table[] = {
 316        {
 317                .procname       = "max_user_watches",
 318                .data           = &max_user_watches,
 319                .maxlen         = sizeof(max_user_watches),
 320                .mode           = 0644,
 321                .proc_handler   = proc_doulongvec_minmax,
 322                .extra1         = &long_zero,
 323                .extra2         = &long_max,
 324        },
 325        { }
 326};
 327
 328static void __init epoll_sysctls_init(void)
 329{
 330        register_sysctl("fs/epoll", epoll_table);
 331}
 332#else
 333#define epoll_sysctls_init() do { } while (0)
 334#endif /* CONFIG_SYSCTL */
 335
 336static const struct file_operations eventpoll_fops;
 337
 338static inline int is_file_epoll(struct file *f)
 339{
 340        return f->f_op == &eventpoll_fops;
 341}
 342
 343/* Setup the structure that is used as key for the RB tree */
 344static inline void ep_set_ffd(struct epoll_filefd *ffd,
 345                              struct file *file, int fd)
 346{
 347        ffd->file = file;
 348        ffd->fd = fd;
 349}
 350
 351/* Compare RB tree keys */
 352static inline int ep_cmp_ffd(struct epoll_filefd *p1,
 353                             struct epoll_filefd *p2)
 354{
 355        return (p1->file > p2->file ? +1:
 356                (p1->file < p2->file ? -1 : p1->fd - p2->fd));
 357}
 358
 359/* Tells us if the item is currently linked */
 360static inline int ep_is_linked(struct epitem *epi)
 361{
 362        return !list_empty(&epi->rdllink);
 363}
 364
 365static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
 366{
 367        return container_of(p, struct eppoll_entry, wait);
 368}
 369
 370/* Get the "struct epitem" from a wait queue pointer */
 371static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
 372{
 373        return container_of(p, struct eppoll_entry, wait)->base;
 374}
 375
 376/**
 377 * ep_events_available - Checks if ready events might be available.
 378 *
 379 * @ep: Pointer to the eventpoll context.
 380 *
 381 * Return: a value different than %zero if ready events are available,
 382 *          or %zero otherwise.
 383 */
 384static inline int ep_events_available(struct eventpoll *ep)
 385{
 386        return !list_empty_careful(&ep->rdllist) ||
 387                READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
 388}
 389
 390#ifdef CONFIG_NET_RX_BUSY_POLL
 391static bool ep_busy_loop_end(void *p, unsigned long start_time)
 392{
 393        struct eventpoll *ep = p;
 394
 395        return ep_events_available(ep) || busy_loop_timeout(start_time);
 396}
 397
 398/*
 399 * Busy poll if globally on and supporting sockets found && no events,
 400 * busy loop will return if need_resched or ep_events_available.
 401 *
 402 * we must do our busy polling with irqs enabled
 403 */
 404static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
 405{
 406        unsigned int napi_id = READ_ONCE(ep->napi_id);
 407
 408        if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
 409                napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
 410                               BUSY_POLL_BUDGET);
 411                if (ep_events_available(ep))
 412                        return true;
 413                /*
 414                 * Busy poll timed out.  Drop NAPI ID for now, we can add
 415                 * it back in when we have moved a socket with a valid NAPI
 416                 * ID onto the ready list.
 417                 */
 418                ep->napi_id = 0;
 419                return false;
 420        }
 421        return false;
 422}
 423
 424/*
 425 * Set epoll busy poll NAPI ID from sk.
 426 */
 427static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
 428{
 429        struct eventpoll *ep;
 430        unsigned int napi_id;
 431        struct socket *sock;
 432        struct sock *sk;
 433
 434        if (!net_busy_loop_on())
 435                return;
 436
 437        sock = sock_from_file(epi->ffd.file);
 438        if (!sock)
 439                return;
 440
 441        sk = sock->sk;
 442        if (!sk)
 443                return;
 444
 445        napi_id = READ_ONCE(sk->sk_napi_id);
 446        ep = epi->ep;
 447
 448        /* Non-NAPI IDs can be rejected
 449         *      or
 450         * Nothing to do if we already have this ID
 451         */
 452        if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
 453                return;
 454
 455        /* record NAPI ID for use in next busy poll */
 456        ep->napi_id = napi_id;
 457}
 458
 459#else
 460
 461static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
 462{
 463        return false;
 464}
 465
 466static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
 467{
 468}
 469
 470#endif /* CONFIG_NET_RX_BUSY_POLL */
 471
 472/*
 473 * As described in commit 0ccf831cb lockdep: annotate epoll
 474 * the use of wait queues used by epoll is done in a very controlled
 475 * manner. Wake ups can nest inside each other, but are never done
 476 * with the same locking. For example:
 477 *
 478 *   dfd = socket(...);
 479 *   efd1 = epoll_create();
 480 *   efd2 = epoll_create();
 481 *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
 482 *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
 483 *
 484 * When a packet arrives to the device underneath "dfd", the net code will
 485 * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
 486 * callback wakeup entry on that queue, and the wake_up() performed by the
 487 * "dfd" net code will end up in ep_poll_callback(). At this point epoll
 488 * (efd1) notices that it may have some event ready, so it needs to wake up
 489 * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
 490 * that ends up in another wake_up(), after having checked about the
 491 * recursion constraints. That are, no more than EP_MAX_NESTS, to avoid
 492 * stack blasting.
 493 *
 494 * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
 495 * this special case of epoll.
 496 */
 497#ifdef CONFIG_DEBUG_LOCK_ALLOC
 498
 499static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
 500                             unsigned pollflags)
 501{
 502        struct eventpoll *ep_src;
 503        unsigned long flags;
 504        u8 nests = 0;
 505
 506        /*
 507         * To set the subclass or nesting level for spin_lock_irqsave_nested()
 508         * it might be natural to create a per-cpu nest count. However, since
 509         * we can recurse on ep->poll_wait.lock, and a non-raw spinlock can
 510         * schedule() in the -rt kernel, the per-cpu variable are no longer
 511         * protected. Thus, we are introducing a per eventpoll nest field.
 512         * If we are not being call from ep_poll_callback(), epi is NULL and
 513         * we are at the first level of nesting, 0. Otherwise, we are being
 514         * called from ep_poll_callback() and if a previous wakeup source is
 515         * not an epoll file itself, we are at depth 1 since the wakeup source
 516         * is depth 0. If the wakeup source is a previous epoll file in the
 517         * wakeup chain then we use its nests value and record ours as
 518         * nests + 1. The previous epoll file nests value is stable since its
 519         * already holding its own poll_wait.lock.
 520         */
 521        if (epi) {
 522                if ((is_file_epoll(epi->ffd.file))) {
 523                        ep_src = epi->ffd.file->private_data;
 524                        nests = ep_src->nests;
 525                } else {
 526                        nests = 1;
 527                }
 528        }
 529        spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
 530        ep->nests = nests + 1;
 531        wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
 532        ep->nests = 0;
 533        spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
 534}
 535
 536#else
 537
 538static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
 539                             __poll_t pollflags)
 540{
 541        wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
 542}
 543
 544#endif
 545
 546static void ep_remove_wait_queue(struct eppoll_entry *pwq)
 547{
 548        wait_queue_head_t *whead;
 549
 550        rcu_read_lock();
 551        /*
 552         * If it is cleared by POLLFREE, it should be rcu-safe.
 553         * If we read NULL we need a barrier paired with
 554         * smp_store_release() in ep_poll_callback(), otherwise
 555         * we rely on whead->lock.
 556         */
 557        whead = smp_load_acquire(&pwq->whead);
 558        if (whead)
 559                remove_wait_queue(whead, &pwq->wait);
 560        rcu_read_unlock();
 561}
 562
 563/*
 564 * This function unregisters poll callbacks from the associated file
 565 * descriptor.  Must be called with "mtx" held.
 566 */
 567static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
 568{
 569        struct eppoll_entry **p = &epi->pwqlist;
 570        struct eppoll_entry *pwq;
 571
 572        while ((pwq = *p) != NULL) {
 573                *p = pwq->next;
 574                ep_remove_wait_queue(pwq);
 575                kmem_cache_free(pwq_cache, pwq);
 576        }
 577}
 578
 579/* call only when ep->mtx is held */
 580static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
 581{
 582        return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
 583}
 584
 585/* call only when ep->mtx is held */
 586static inline void ep_pm_stay_awake(struct epitem *epi)
 587{
 588        struct wakeup_source *ws = ep_wakeup_source(epi);
 589
 590        if (ws)
 591                __pm_stay_awake(ws);
 592}
 593
 594static inline bool ep_has_wakeup_source(struct epitem *epi)
 595{
 596        return rcu_access_pointer(epi->ws) ? true : false;
 597}
 598
 599/* call when ep->mtx cannot be held (ep_poll_callback) */
 600static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
 601{
 602        struct wakeup_source *ws;
 603
 604        rcu_read_lock();
 605        ws = rcu_dereference(epi->ws);
 606        if (ws)
 607                __pm_stay_awake(ws);
 608        rcu_read_unlock();
 609}
 610
 611
 612/*
 613 * ep->mutex needs to be held because we could be hit by
 614 * eventpoll_release_file() and epoll_ctl().
 615 */
 616static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
 617{
 618        /*
 619         * Steal the ready list, and re-init the original one to the
 620         * empty list. Also, set ep->ovflist to NULL so that events
 621         * happening while looping w/out locks, are not lost. We cannot
 622         * have the poll callback to queue directly on ep->rdllist,
 623         * because we want the "sproc" callback to be able to do it
 624         * in a lockless way.
 625         */
 626        lockdep_assert_irqs_enabled();
 627        write_lock_irq(&ep->lock);
 628        list_splice_init(&ep->rdllist, txlist);
 629        WRITE_ONCE(ep->ovflist, NULL);
 630        write_unlock_irq(&ep->lock);
 631}
 632
 633static void ep_done_scan(struct eventpoll *ep,
 634                         struct list_head *txlist)
 635{
 636        struct epitem *epi, *nepi;
 637
 638        write_lock_irq(&ep->lock);
 639        /*
 640         * During the time we spent inside the "sproc" callback, some
 641         * other events might have been queued by the poll callback.
 642         * We re-insert them inside the main ready-list here.
 643         */
 644        for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
 645             nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
 646                /*
 647                 * We need to check if the item is already in the list.
 648                 * During the "sproc" callback execution time, items are
 649                 * queued into ->ovflist but the "txlist" might already
 650                 * contain them, and the list_splice() below takes care of them.
 651                 */
 652                if (!ep_is_linked(epi)) {
 653                        /*
 654                         * ->ovflist is LIFO, so we have to reverse it in order
 655                         * to keep in FIFO.
 656                         */
 657                        list_add(&epi->rdllink, &ep->rdllist);
 658                        ep_pm_stay_awake(epi);
 659                }
 660        }
 661        /*
 662         * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
 663         * releasing the lock, events will be queued in the normal way inside
 664         * ep->rdllist.
 665         */
 666        WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
 667
 668        /*
 669         * Quickly re-inject items left on "txlist".
 670         */
 671        list_splice(txlist, &ep->rdllist);
 672        __pm_relax(ep->ws);
 673
 674        if (!list_empty(&ep->rdllist)) {
 675                if (waitqueue_active(&ep->wq))
 676                        wake_up(&ep->wq);
 677        }
 678
 679        write_unlock_irq(&ep->lock);
 680}
 681
 682static void epi_rcu_free(struct rcu_head *head)
 683{
 684        struct epitem *epi = container_of(head, struct epitem, rcu);
 685        kmem_cache_free(epi_cache, epi);
 686}
 687
 688static void ep_get(struct eventpoll *ep)
 689{
 690        refcount_inc(&ep->refcount);
 691}
 692
 693/*
 694 * Returns true if the event poll can be disposed
 695 */
 696static bool ep_refcount_dec_and_test(struct eventpoll *ep)
 697{
 698        if (!refcount_dec_and_test(&ep->refcount))
 699                return false;
 700
 701        WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root));
 702        return true;
 703}
 704
 705static void ep_free(struct eventpoll *ep)
 706{
 707        mutex_destroy(&ep->mtx);
 708        free_uid(ep->user);
 709        wakeup_source_unregister(ep->ws);
 710        kfree(ep);
 711}
 712
 713/*
 714 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
 715 * all the associated resources. Must be called with "mtx" held.
 716 * If the dying flag is set, do the removal only if force is true.
 717 * This prevents ep_clear_and_put() from dropping all the ep references
 718 * while running concurrently with eventpoll_release_file().
 719 * Returns true if the eventpoll can be disposed.
 720 */
 721static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
 722{
 723        struct file *file = epi->ffd.file;
 724        struct epitems_head *to_free;
 725        struct hlist_head *head;
 726
 727        lockdep_assert_irqs_enabled();
 728
 729        /*
 730         * Removes poll wait queue hooks.
 731         */
 732        ep_unregister_pollwait(ep, epi);
 733
 734        /* Remove the current item from the list of epoll hooks */
 735        spin_lock(&file->f_lock);
 736        if (epi->dying && !force) {
 737                spin_unlock(&file->f_lock);
 738                return false;
 739        }
 740
 741        to_free = NULL;
 742        head = file->f_ep;
 743        if (head->first == &epi->fllink && !epi->fllink.next) {
 744                file->f_ep = NULL;
 745                if (!is_file_epoll(file)) {
 746                        struct epitems_head *v;
 747                        v = container_of(head, struct epitems_head, epitems);
 748                        if (!smp_load_acquire(&v->next))
 749                                to_free = v;
 750                }
 751        }
 752        hlist_del_rcu(&epi->fllink);
 753        spin_unlock(&file->f_lock);
 754        free_ephead(to_free);
 755
 756        rb_erase_cached(&epi->rbn, &ep->rbr);
 757
 758        write_lock_irq(&ep->lock);
 759        if (ep_is_linked(epi))
 760                list_del_init(&epi->rdllink);
 761        write_unlock_irq(&ep->lock);
 762
 763        wakeup_source_unregister(ep_wakeup_source(epi));
 764        /*
 765         * At this point it is safe to free the eventpoll item. Use the union
 766         * field epi->rcu, since we are trying to minimize the size of
 767         * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
 768         * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
 769         * use of the rbn field.
 770         */
 771        call_rcu(&epi->rcu, epi_rcu_free);
 772
 773        percpu_counter_dec(&ep->user->epoll_watches);
 774        return ep_refcount_dec_and_test(ep);
 775}
 776
 777/*
 778 * ep_remove variant for callers owing an additional reference to the ep
 779 */
 780static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
 781{
 782        WARN_ON_ONCE(__ep_remove(ep, epi, false));
 783}
 784
 785static void ep_clear_and_put(struct eventpoll *ep)
 786{
 787        struct rb_node *rbp, *next;
 788        struct epitem *epi;
 789        bool dispose;
 790
 791        /* We need to release all tasks waiting for these file */
 792        if (waitqueue_active(&ep->poll_wait))
 793                ep_poll_safewake(ep, NULL, 0);
 794
 795        mutex_lock(&ep->mtx);
 796
 797        /*
 798         * Walks through the whole tree by unregistering poll callbacks.
 799         */
 800        for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
 801                epi = rb_entry(rbp, struct epitem, rbn);
 802
 803                ep_unregister_pollwait(ep, epi);
 804                cond_resched();
 805        }
 806
 807        /*
 808         * Walks through the whole tree and try to free each "struct epitem".
 809         * Note that ep_remove_safe() will not remove the epitem in case of a
 810         * racing eventpoll_release_file(); the latter will do the removal.
 811         * At this point we are sure no poll callbacks will be lingering around.
 812         * Since we still own a reference to the eventpoll struct, the loop can't
 813         * dispose it.
 814         */
 815        for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {
 816                next = rb_next(rbp);
 817                epi = rb_entry(rbp, struct epitem, rbn);
 818                ep_remove_safe(ep, epi);
 819                cond_resched();
 820        }
 821
 822        dispose = ep_refcount_dec_and_test(ep);
 823        mutex_unlock(&ep->mtx);
 824
 825        if (dispose)
 826                ep_free(ep);
 827}
 828
 829static int ep_eventpoll_release(struct inode *inode, struct file *file)
 830{
 831        struct eventpoll *ep = file->private_data;
 832
 833        if (ep)
 834                ep_clear_and_put(ep);
 835
 836        return 0;
 837}
 838
 839static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
 840
 841static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)
 842{
 843        struct eventpoll *ep = file->private_data;
 844        LIST_HEAD(txlist);
 845        struct epitem *epi, *tmp;
 846        poll_table pt;
 847        __poll_t res = 0;
 848
 849        init_poll_funcptr(&pt, NULL);
 850
 851        /* Insert inside our poll wait queue */
 852        poll_wait(file, &ep->poll_wait, wait);
 853
 854        /*
 855         * Proceed to find out if wanted events are really available inside
 856         * the ready list.
 857         */
 858        mutex_lock_nested(&ep->mtx, depth);
 859        ep_start_scan(ep, &txlist);
 860        list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
 861                if (ep_item_poll(epi, &pt, depth + 1)) {
 862                        res = EPOLLIN | EPOLLRDNORM;
 863                        break;
 864                } else {
 865                        /*
 866                         * Item has been dropped into the ready list by the poll
 867                         * callback, but it's not actually ready, as far as
 868                         * caller requested events goes. We can remove it here.
 869                         */
 870                        __pm_relax(ep_wakeup_source(epi));
 871                        list_del_init(&epi->rdllink);
 872                }
 873        }
 874        ep_done_scan(ep, &txlist);
 875        mutex_unlock(&ep->mtx);
 876        return res;
 877}
 878
 879/*
 880 * Differs from ep_eventpoll_poll() in that internal callers already have
 881 * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
 882 * is correctly annotated.
 883 */
 884static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
 885                                 int depth)
 886{
 887        struct file *file = epi->ffd.file;
 888        __poll_t res;
 889
 890        pt->_key = epi->event.events;
 891        if (!is_file_epoll(file))
 892                res = vfs_poll(file, pt);
 893        else
 894                res = __ep_eventpoll_poll(file, pt, depth);
 895        return res & epi->event.events;
 896}
 897
 898static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
 899{
 900        return __ep_eventpoll_poll(file, wait, 0);
 901}
 902
 903#ifdef CONFIG_PROC_FS
 904static void ep_show_fdinfo(struct seq_file *m, struct file *f)
 905{
 906        struct eventpoll *ep = f->private_data;
 907        struct rb_node *rbp;
 908
 909        mutex_lock(&ep->mtx);
 910        for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
 911                struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
 912                struct inode *inode = file_inode(epi->ffd.file);
 913
 914                seq_printf(m, "tfd: %8d events: %8x data: %16llx "
 915                           " pos:%lli ino:%lx sdev:%x\n",
 916                           epi->ffd.fd, epi->event.events,
 917                           (long long)epi->event.data,
 918                           (long long)epi->ffd.file->f_pos,
 919                           inode->i_ino, inode->i_sb->s_dev);
 920                if (seq_has_overflowed(m))
 921                        break;
 922        }
 923        mutex_unlock(&ep->mtx);
 924}
 925#endif
 926
 927/* File callbacks that implement the eventpoll file behaviour */
 928static const struct file_operations eventpoll_fops = {
 929#ifdef CONFIG_PROC_FS
 930        .show_fdinfo    = ep_show_fdinfo,
 931#endif
 932        .release        = ep_eventpoll_release,
 933        .poll           = ep_eventpoll_poll,
 934        .llseek         = noop_llseek,
 935};
 936
 937/*
 938 * This is called from eventpoll_release() to unlink files from the eventpoll
 939 * interface. We need to have this facility to cleanup correctly files that are
 940 * closed without being removed from the eventpoll interface.
 941 */
 942void eventpoll_release_file(struct file *file)
 943{
 944        struct eventpoll *ep;
 945        struct epitem *epi;
 946        bool dispose;
 947
 948        /*
 949         * Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from
 950         * touching the epitems list before eventpoll_release_file() can access
 951         * the ep->mtx.
 952         */
 953again:
 954        spin_lock(&file->f_lock);
 955        if (file->f_ep && file->f_ep->first) {
 956                epi = hlist_entry(file->f_ep->first, struct epitem, fllink);
 957                epi->dying = true;
 958                spin_unlock(&file->f_lock);
 959
 960                /*
 961                 * ep access is safe as we still own a reference to the ep
 962                 * struct
 963                 */
 964                ep = epi->ep;
 965                mutex_lock(&ep->mtx);
 966                dispose = __ep_remove(ep, epi, true);
 967                mutex_unlock(&ep->mtx);
 968
 969                if (dispose)
 970                        ep_free(ep);
 971                goto again;
 972        }
 973        spin_unlock(&file->f_lock);
 974}
 975
 976static int ep_alloc(struct eventpoll **pep)
 977{
 978        struct eventpoll *ep;
 979
 980        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 981        if (unlikely(!ep))
 982                return -ENOMEM;
 983
 984        mutex_init(&ep->mtx);
 985        rwlock_init(&ep->lock);
 986        init_waitqueue_head(&ep->wq);
 987        init_waitqueue_head(&ep->poll_wait);
 988        INIT_LIST_HEAD(&ep->rdllist);
 989        ep->rbr = RB_ROOT_CACHED;
 990        ep->ovflist = EP_UNACTIVE_PTR;
 991        ep->user = get_current_user();
 992        refcount_set(&ep->refcount, 1);
 993
 994        *pep = ep;
 995
 996        return 0;
 997}
 998
 999/*
1000 * Search the file inside the eventpoll tree. The RB tree operations
1001 * are protected by the "mtx" mutex, and ep_find() must be called with
1002 * "mtx" held.
1003 */
1004static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
1005{
1006        int kcmp;
1007        struct rb_node *rbp;
1008        struct epitem *epi, *epir = NULL;
1009        struct epoll_filefd ffd;
1010
1011        ep_set_ffd(&ffd, file, fd);
1012        for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
1013                epi = rb_entry(rbp, struct epitem, rbn);
1014                kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
1015                if (kcmp > 0)
1016                        rbp = rbp->rb_right;
1017                else if (kcmp < 0)
1018                        rbp = rbp->rb_left;
1019                else {
1020                        epir = epi;
1021                        break;
1022                }
1023        }
1024
1025        return epir;
1026}
1027
1028#ifdef CONFIG_KCMP
1029static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
1030{
1031        struct rb_node *rbp;
1032        struct epitem *epi;
1033
1034        for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1035                epi = rb_entry(rbp, struct epitem, rbn);
1036                if (epi->ffd.fd == tfd) {
1037                        if (toff == 0)
1038                                return epi;
1039                        else
1040                                toff--;
1041                }
1042                cond_resched();
1043        }
1044
1045        return NULL;
1046}
1047
1048struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
1049                                     unsigned long toff)
1050{
1051        struct file *file_raw;
1052        struct eventpoll *ep;
1053        struct epitem *epi;
1054
1055        if (!is_file_epoll(file))
1056                return ERR_PTR(-EINVAL);
1057
1058        ep = file->private_data;
1059
1060        mutex_lock(&ep->mtx);
1061        epi = ep_find_tfd(ep, tfd, toff);
1062        if (epi)
1063                file_raw = epi->ffd.file;
1064        else
1065                file_raw = ERR_PTR(-ENOENT);
1066        mutex_unlock(&ep->mtx);
1067
1068        return file_raw;
1069}
1070#endif /* CONFIG_KCMP */
1071
1072/*
1073 * Adds a new entry to the tail of the list in a lockless way, i.e.
1074 * multiple CPUs are allowed to call this function concurrently.
1075 *
1076 * Beware: it is necessary to prevent any other modifications of the
1077 *         existing list until all changes are completed, in other words
1078 *         concurrent list_add_tail_lockless() calls should be protected
1079 *         with a read lock, where write lock acts as a barrier which
1080 *         makes sure all list_add_tail_lockless() calls are fully
1081 *         completed.
1082 *
1083 *        Also an element can be locklessly added to the list only in one
1084 *        direction i.e. either to the tail or to the head, otherwise
1085 *        concurrent access will corrupt the list.
1086 *
1087 * Return: %false if element has been already added to the list, %true
1088 * otherwise.
1089 */
1090static inline bool list_add_tail_lockless(struct list_head *new,
1091                                          struct list_head *head)
1092{
1093        struct list_head *prev;
1094
1095        /*
1096         * This is simple 'new->next = head' operation, but cmpxchg()
1097         * is used in order to detect that same element has been just
1098         * added to the list from another CPU: the winner observes
1099         * new->next == new.
1100         */
1101        if (!try_cmpxchg(&new->next, &new, head))
1102                return false;
1103
1104        /*
1105         * Initially ->next of a new element must be updated with the head
1106         * (we are inserting to the tail) and only then pointers are atomically
1107         * exchanged.  XCHG guarantees memory ordering, thus ->next should be
1108         * updated before pointers are actually swapped and pointers are
1109         * swapped before prev->next is updated.
1110         */
1111
1112        prev = xchg(&head->prev, new);
1113
1114        /*
1115         * It is safe to modify prev->next and new->prev, because a new element
1116         * is added only to the tail and new->next is updated before XCHG.
1117         */
1118
1119        prev->next = new;
1120        new->prev = prev;
1121
1122        return true;
1123}
1124
1125/*
1126 * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
1127 * i.e. multiple CPUs are allowed to call this function concurrently.
1128 *
1129 * Return: %false if epi element has been already chained, %true otherwise.
1130 */
1131static inline bool chain_epi_lockless(struct epitem *epi)
1132{
1133        struct eventpoll *ep = epi->ep;
1134
1135        /* Fast preliminary check */
1136        if (epi->next != EP_UNACTIVE_PTR)
1137                return false;
1138
1139        /* Check that the same epi has not been just chained from another CPU */
1140        if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1141                return false;
1142
1143        /* Atomically exchange tail */
1144        epi->next = xchg(&ep->ovflist, epi);
1145
1146        return true;
1147}
1148
1149/*
1150 * This is the callback that is passed to the wait queue wakeup
1151 * mechanism. It is called by the stored file descriptors when they
1152 * have events to report.
1153 *
1154 * This callback takes a read lock in order not to contend with concurrent
1155 * events from another file descriptor, thus all modifications to ->rdllist
1156 * or ->ovflist are lockless.  Read lock is paired with the write lock from
1157 * ep_scan_ready_list(), which stops all list modifications and guarantees
1158 * that lists state is seen correctly.
1159 *
1160 * Another thing worth to mention is that ep_poll_callback() can be called
1161 * concurrently for the same @epi from different CPUs if poll table was inited
1162 * with several wait queues entries.  Plural wakeup from different CPUs of a
1163 * single wait queue is serialized by wq.lock, but the case when multiple wait
1164 * queues are used should be detected accordingly.  This is detected using
1165 * cmpxchg() operation.
1166 */
1167static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1168{
1169        int pwake = 0;
1170        struct epitem *epi = ep_item_from_wait(wait);
1171        struct eventpoll *ep = epi->ep;
1172        __poll_t pollflags = key_to_poll(key);
1173        unsigned long flags;
1174        int ewake = 0;
1175
1176        read_lock_irqsave(&ep->lock, flags);
1177
1178        ep_set_busy_poll_napi_id(epi);
1179
1180        /*
1181         * If the event mask does not contain any poll(2) event, we consider the
1182         * descriptor to be disabled. This condition is likely the effect of the
1183         * EPOLLONESHOT bit that disables the descriptor when an event is received,
1184         * until the next EPOLL_CTL_MOD will be issued.
1185         */
1186        if (!(epi->event.events & ~EP_PRIVATE_BITS))
1187                goto out_unlock;
1188
1189        /*
1190         * Check the events coming with the callback. At this stage, not
1191         * every device reports the events in the "key" parameter of the
1192         * callback. We need to be able to handle both cases here, hence the
1193         * test for "key" != NULL before the event match test.
1194         */
1195        if (pollflags && !(pollflags & epi->event.events))
1196                goto out_unlock;
1197
1198        /*
1199         * If we are transferring events to userspace, we can hold no locks
1200         * (because we're accessing user memory, and because of linux f_op->poll()
1201         * semantics). All the events that happen during that period of time are
1202         * chained in ep->ovflist and requeued later on.
1203         */
1204        if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1205                if (chain_epi_lockless(epi))
1206                        ep_pm_stay_awake_rcu(epi);
1207        } else if (!ep_is_linked(epi)) {
1208                /* In the usual case, add event to ready list. */
1209                if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1210                        ep_pm_stay_awake_rcu(epi);
1211        }
1212
1213        /*
1214         * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1215         * wait list.
1216         */
1217        if (waitqueue_active(&ep->wq)) {
1218                if ((epi->event.events & EPOLLEXCLUSIVE) &&
1219                                        !(pollflags & POLLFREE)) {
1220                        switch (pollflags & EPOLLINOUT_BITS) {
1221                        case EPOLLIN:
1222                                if (epi->event.events & EPOLLIN)
1223                                        ewake = 1;
1224                                break;
1225                        case EPOLLOUT:
1226                                if (epi->event.events & EPOLLOUT)
1227                                        ewake = 1;
1228                                break;
1229                        case 0:
1230                                ewake = 1;
1231                                break;
1232                        }
1233                }
1234                wake_up(&ep->wq);
1235        }
1236        if (waitqueue_active(&ep->poll_wait))
1237                pwake++;
1238
1239out_unlock:
1240        read_unlock_irqrestore(&ep->lock, flags);
1241
1242        /* We have to call this outside the lock */
1243        if (pwake)
1244                ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
1245
1246        if (!(epi->event.events & EPOLLEXCLUSIVE))
1247                ewake = 1;
1248
1249        if (pollflags & POLLFREE) {
1250                /*
1251                 * If we race with ep_remove_wait_queue() it can miss
1252                 * ->whead = NULL and do another remove_wait_queue() after
1253                 * us, so we can't use __remove_wait_queue().
1254                 */
1255                list_del_init(&wait->entry);
1256                /*
1257                 * ->whead != NULL protects us from the race with
1258                 * ep_clear_and_put() or ep_remove(), ep_remove_wait_queue()
1259                 * takes whead->lock held by the caller. Once we nullify it,
1260                 * nothing protects ep/epi or even wait.
1261                 */
1262                smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1263        }
1264
1265        return ewake;
1266}
1267
1268/*
1269 * This is the callback that is used to add our wait queue to the
1270 * target file wakeup lists.
1271 */
1272static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1273                                 poll_table *pt)
1274{
1275        struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt);
1276        struct epitem *epi = epq->epi;
1277        struct eppoll_entry *pwq;
1278
1279        if (unlikely(!epi))     // an earlier allocation has failed
1280                return;
1281
1282        pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL);
1283        if (unlikely(!pwq)) {
1284                epq->epi = NULL;
1285                return;
1286        }
1287
1288        init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1289        pwq->whead = whead;
1290        pwq->base = epi;
1291        if (epi->event.events & EPOLLEXCLUSIVE)
1292                add_wait_queue_exclusive(whead, &pwq->wait);
1293        else
1294                add_wait_queue(whead, &pwq->wait);
1295        pwq->next = epi->pwqlist;
1296        epi->pwqlist = pwq;
1297}
1298
1299static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1300{
1301        int kcmp;
1302        struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
1303        struct epitem *epic;
1304        bool leftmost = true;
1305
1306        while (*p) {
1307                parent = *p;
1308                epic = rb_entry(parent, struct epitem, rbn);
1309                kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1310                if (kcmp > 0) {
1311                        p = &parent->rb_right;
1312                        leftmost = false;
1313                } else
1314                        p = &parent->rb_left;
1315        }
1316        rb_link_node(&epi->rbn, parent, p);
1317        rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1318}
1319
1320
1321
1322#define PATH_ARR_SIZE 5
1323/*
1324 * These are the number paths of length 1 to 5, that we are allowing to emanate
1325 * from a single file of interest. For example, we allow 1000 paths of length
1326 * 1, to emanate from each file of interest. This essentially represents the
1327 * potential wakeup paths, which need to be limited in order to avoid massive
1328 * uncontrolled wakeup storms. The common use case should be a single ep which
1329 * is connected to n file sources. In this case each file source has 1 path
1330 * of length 1. Thus, the numbers below should be more than sufficient. These
1331 * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1332 * and delete can't add additional paths. Protected by the epnested_mutex.
1333 */
1334static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1335static int path_count[PATH_ARR_SIZE];
1336
1337static int path_count_inc(int nests)
1338{
1339        /* Allow an arbitrary number of depth 1 paths */
1340        if (nests == 0)
1341                return 0;
1342
1343        if (++path_count[nests] > path_limits[nests])
1344                return -1;
1345        return 0;
1346}
1347
1348static void path_count_init(void)
1349{
1350        int i;
1351
1352        for (i = 0; i < PATH_ARR_SIZE; i++)
1353                path_count[i] = 0;
1354}
1355
1356static int reverse_path_check_proc(struct hlist_head *refs, int depth)
1357{
1358        int error = 0;
1359        struct epitem *epi;
1360
1361        if (depth > EP_MAX_NESTS) /* too deep nesting */
1362                return -1;
1363
1364        /* CTL_DEL can remove links here, but that can't increase our count */
1365        hlist_for_each_entry_rcu(epi, refs, fllink) {
1366                struct hlist_head *refs = &epi->ep->refs;
1367                if (hlist_empty(refs))
1368                        error = path_count_inc(depth);
1369                else
1370                        error = reverse_path_check_proc(refs, depth + 1);
1371                if (error != 0)
1372                        break;
1373        }
1374        return error;
1375}
1376
1377/**
1378 * reverse_path_check - The tfile_check_list is list of epitem_head, which have
1379 *                      links that are proposed to be newly added. We need to
1380 *                      make sure that those added links don't add too many
1381 *                      paths such that we will spend all our time waking up
1382 *                      eventpoll objects.
1383 *
1384 * Return: %zero if the proposed links don't create too many paths,
1385 *          %-1 otherwise.
1386 */
1387static int reverse_path_check(void)
1388{
1389        struct epitems_head *p;
1390
1391        for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) {
1392                int error;
1393                path_count_init();
1394                rcu_read_lock();
1395                error = reverse_path_check_proc(&p->epitems, 0);
1396                rcu_read_unlock();
1397                if (error)
1398                        return error;
1399        }
1400        return 0;
1401}
1402
1403static int ep_create_wakeup_source(struct epitem *epi)
1404{
1405        struct name_snapshot n;
1406        struct wakeup_source *ws;
1407
1408        if (!epi->ep->ws) {
1409                epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
1410                if (!epi->ep->ws)
1411                        return -ENOMEM;
1412        }
1413
1414        take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1415        ws = wakeup_source_register(NULL, n.name.name);
1416        release_dentry_name_snapshot(&n);
1417
1418        if (!ws)
1419                return -ENOMEM;
1420        rcu_assign_pointer(epi->ws, ws);
1421
1422        return 0;
1423}
1424
1425/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
1426static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1427{
1428        struct wakeup_source *ws = ep_wakeup_source(epi);
1429
1430        RCU_INIT_POINTER(epi->ws, NULL);
1431
1432        /*
1433         * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1434         * used internally by wakeup_source_remove, too (called by
1435         * wakeup_source_unregister), so we cannot use call_rcu
1436         */
1437        synchronize_rcu();
1438        wakeup_source_unregister(ws);
1439}
1440
1441static int attach_epitem(struct file *file, struct epitem *epi)
1442{
1443        struct epitems_head *to_free = NULL;
1444        struct hlist_head *head = NULL;
1445        struct eventpoll *ep = NULL;
1446
1447        if (is_file_epoll(file))
1448                ep = file->private_data;
1449
1450        if (ep) {
1451                head = &ep->refs;
1452        } else if (!READ_ONCE(file->f_ep)) {
1453allocate:
1454                to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL);
1455                if (!to_free)
1456                        return -ENOMEM;
1457                head = &to_free->epitems;
1458        }
1459        spin_lock(&file->f_lock);
1460        if (!file->f_ep) {
1461                if (unlikely(!head)) {
1462                        spin_unlock(&file->f_lock);
1463                        goto allocate;
1464                }
1465                file->f_ep = head;
1466                to_free = NULL;
1467        }
1468        hlist_add_head_rcu(&epi->fllink, file->f_ep);
1469        spin_unlock(&file->f_lock);
1470        free_ephead(to_free);
1471        return 0;
1472}
1473
1474/*
1475 * Must be called with "mtx" held.
1476 */
1477static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1478                     struct file *tfile, int fd, int full_check)
1479{
1480        int error, pwake = 0;
1481        __poll_t revents;
1482        struct epitem *epi;
1483        struct ep_pqueue epq;
1484        struct eventpoll *tep = NULL;
1485
1486        if (is_file_epoll(tfile))
1487                tep = tfile->private_data;
1488
1489        lockdep_assert_irqs_enabled();
1490
1491        if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,
1492                                            max_user_watches) >= 0))
1493                return -ENOSPC;
1494        percpu_counter_inc(&ep->user->epoll_watches);
1495
1496        if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {
1497                percpu_counter_dec(&ep->user->epoll_watches);
1498                return -ENOMEM;
1499        }
1500
1501        /* Item initialization follow here ... */
1502        INIT_LIST_HEAD(&epi->rdllink);
1503        epi->ep = ep;
1504        ep_set_ffd(&epi->ffd, tfile, fd);
1505        epi->event = *event;
1506        epi->next = EP_UNACTIVE_PTR;
1507
1508        if (tep)
1509                mutex_lock_nested(&tep->mtx, 1);
1510        /* Add the current item to the list of active epoll hook for this file */
1511        if (unlikely(attach_epitem(tfile, epi) < 0)) {
1512                if (tep)
1513                        mutex_unlock(&tep->mtx);
1514                kmem_cache_free(epi_cache, epi);
1515                percpu_counter_dec(&ep->user->epoll_watches);
1516                return -ENOMEM;
1517        }
1518
1519        if (full_check && !tep)
1520                list_file(tfile);
1521
1522        /*
1523         * Add the current item to the RB tree. All RB tree operations are
1524         * protected by "mtx", and ep_insert() is called with "mtx" held.
1525         */
1526        ep_rbtree_insert(ep, epi);
1527        if (tep)
1528                mutex_unlock(&tep->mtx);
1529
1530        /*
1531         * ep_remove_safe() calls in the later error paths can't lead to
1532         * ep_free() as the ep file itself still holds an ep reference.
1533         */
1534        ep_get(ep);
1535
1536        /* now check if we've created too many backpaths */
1537        if (unlikely(full_check && reverse_path_check())) {
1538                ep_remove_safe(ep, epi);
1539                return -EINVAL;
1540        }
1541
1542        if (epi->event.events & EPOLLWAKEUP) {
1543                error = ep_create_wakeup_source(epi);
1544                if (error) {
1545                        ep_remove_safe(ep, epi);
1546                        return error;
1547                }
1548        }
1549
1550        /* Initialize the poll table using the queue callback */
1551        epq.epi = epi;
1552        init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1553
1554        /*
1555         * Attach the item to the poll hooks and get current event bits.
1556         * We can safely use the file* here because its usage count has
1557         * been increased by the caller of this function. Note that after
1558         * this operation completes, the poll callback can start hitting
1559         * the new item.
1560         */
1561        revents = ep_item_poll(epi, &epq.pt, 1);
1562
1563        /*
1564         * We have to check if something went wrong during the poll wait queue
1565         * install process. Namely an allocation for a wait queue failed due
1566         * high memory pressure.
1567         */
1568        if (unlikely(!epq.epi)) {
1569                ep_remove_safe(ep, epi);
1570                return -ENOMEM;
1571        }
1572
1573        /* We have to drop the new item inside our item list to keep track of it */
1574        write_lock_irq(&ep->lock);
1575
1576        /* record NAPI ID of new item if present */
1577        ep_set_busy_poll_napi_id(epi);
1578
1579        /* If the file is already "ready" we drop it inside the ready list */
1580        if (revents && !ep_is_linked(epi)) {
1581                list_add_tail(&epi->rdllink, &ep->rdllist);
1582                ep_pm_stay_awake(epi);
1583
1584                /* Notify waiting tasks that events are available */
1585                if (waitqueue_active(&ep->wq))
1586                        wake_up(&ep->wq);
1587                if (waitqueue_active(&ep->poll_wait))
1588                        pwake++;
1589        }
1590
1591        write_unlock_irq(&ep->lock);
1592
1593        /* We have to call this outside the lock */
1594        if (pwake)
1595                ep_poll_safewake(ep, NULL, 0);
1596
1597        return 0;
1598}
1599
1600/*
1601 * Modify the interest event mask by dropping an event if the new mask
1602 * has a match in the current file status. Must be called with "mtx" held.
1603 */
1604static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1605                     const struct epoll_event *event)
1606{
1607        int pwake = 0;
1608        poll_table pt;
1609
1610        lockdep_assert_irqs_enabled();
1611
1612        init_poll_funcptr(&pt, NULL);
1613
1614        /*
1615         * Set the new event interest mask before calling f_op->poll();
1616         * otherwise we might miss an event that happens between the
1617         * f_op->poll() call and the new event set registering.
1618         */
1619        epi->event.events = event->events; /* need barrier below */
1620        epi->event.data = event->data; /* protected by mtx */
1621        if (epi->event.events & EPOLLWAKEUP) {
1622                if (!ep_has_wakeup_source(epi))
1623                        ep_create_wakeup_source(epi);
1624        } else if (ep_has_wakeup_source(epi)) {
1625                ep_destroy_wakeup_source(epi);
1626        }
1627
1628        /*
1629         * The following barrier has two effects:
1630         *
1631         * 1) Flush epi changes above to other CPUs.  This ensures
1632         *    we do not miss events from ep_poll_callback if an
1633         *    event occurs immediately after we call f_op->poll().
1634         *    We need this because we did not take ep->lock while
1635         *    changing epi above (but ep_poll_callback does take
1636         *    ep->lock).
1637         *
1638         * 2) We also need to ensure we do not miss _past_ events
1639         *    when calling f_op->poll().  This barrier also
1640         *    pairs with the barrier in wq_has_sleeper (see
1641         *    comments for wq_has_sleeper).
1642         *
1643         * This barrier will now guarantee ep_poll_callback or f_op->poll
1644         * (or both) will notice the readiness of an item.
1645         */
1646        smp_mb();
1647
1648        /*
1649         * Get current event bits. We can safely use the file* here because
1650         * its usage count has been increased by the caller of this function.
1651         * If the item is "hot" and it is not registered inside the ready
1652         * list, push it inside.
1653         */
1654        if (ep_item_poll(epi, &pt, 1)) {
1655                write_lock_irq(&ep->lock);
1656                if (!ep_is_linked(epi)) {
1657                        list_add_tail(&epi->rdllink, &ep->rdllist);
1658                        ep_pm_stay_awake(epi);
1659
1660                        /* Notify waiting tasks that events are available */
1661                        if (waitqueue_active(&ep->wq))
1662                                wake_up(&ep->wq);
1663                        if (waitqueue_active(&ep->poll_wait))
1664                                pwake++;
1665                }
1666                write_unlock_irq(&ep->lock);
1667        }
1668
1669        /* We have to call this outside the lock */
1670        if (pwake)
1671                ep_poll_safewake(ep, NULL, 0);
1672
1673        return 0;
1674}
1675
1676static int ep_send_events(struct eventpoll *ep,
1677                          struct epoll_event __user *events, int maxevents)
1678{
1679        struct epitem *epi, *tmp;
1680        LIST_HEAD(txlist);
1681        poll_table pt;
1682        int res = 0;
1683
1684        /*
1685         * Always short-circuit for fatal signals to allow threads to make a
1686         * timely exit without the chance of finding more events available and
1687         * fetching repeatedly.
1688         */
1689        if (fatal_signal_pending(current))
1690                return -EINTR;
1691
1692        init_poll_funcptr(&pt, NULL);
1693
1694        mutex_lock(&ep->mtx);
1695        ep_start_scan(ep, &txlist);
1696
1697        /*
1698         * We can loop without lock because we are passed a task private list.
1699         * Items cannot vanish during the loop we are holding ep->mtx.
1700         */
1701        list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
1702                struct wakeup_source *ws;
1703                __poll_t revents;
1704
1705                if (res >= maxevents)
1706                        break;
1707
1708                /*
1709                 * Activate ep->ws before deactivating epi->ws to prevent
1710                 * triggering auto-suspend here (in case we reactive epi->ws
1711                 * below).
1712                 *
1713                 * This could be rearranged to delay the deactivation of epi->ws
1714                 * instead, but then epi->ws would temporarily be out of sync
1715                 * with ep_is_linked().
1716                 */
1717                ws = ep_wakeup_source(epi);
1718                if (ws) {
1719                        if (ws->active)
1720                                __pm_stay_awake(ep->ws);
1721                        __pm_relax(ws);
1722                }
1723
1724                list_del_init(&epi->rdllink);
1725
1726                /*
1727                 * If the event mask intersect the caller-requested one,
1728                 * deliver the event to userspace. Again, we are holding ep->mtx,
1729                 * so no operations coming from userspace can change the item.
1730                 */
1731                revents = ep_item_poll(epi, &pt, 1);
1732                if (!revents)
1733                        continue;
1734
1735                events = epoll_put_uevent(revents, epi->event.data, events);
1736                if (!events) {
1737                        list_add(&epi->rdllink, &txlist);
1738                        ep_pm_stay_awake(epi);
1739                        if (!res)
1740                                res = -EFAULT;
1741                        break;
1742                }
1743                res++;
1744                if (epi->event.events & EPOLLONESHOT)
1745                        epi->event.events &= EP_PRIVATE_BITS;
1746                else if (!(epi->event.events & EPOLLET)) {
1747                        /*
1748                         * If this file has been added with Level
1749                         * Trigger mode, we need to insert back inside
1750                         * the ready list, so that the next call to
1751                         * epoll_wait() will check again the events
1752                         * availability. At this point, no one can insert
1753                         * into ep->rdllist besides us. The epoll_ctl()
1754                         * callers are locked out by
1755                         * ep_scan_ready_list() holding "mtx" and the
1756                         * poll callback will queue them in ep->ovflist.
1757                         */
1758                        list_add_tail(&epi->rdllink, &ep->rdllist);
1759                        ep_pm_stay_awake(epi);
1760                }
1761        }
1762        ep_done_scan(ep, &txlist);
1763        mutex_unlock(&ep->mtx);
1764
1765        return res;
1766}
1767
1768static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
1769{
1770        struct timespec64 now;
1771
1772        if (ms < 0)
1773                return NULL;
1774
1775        if (!ms) {
1776                to->tv_sec = 0;
1777                to->tv_nsec = 0;
1778                return to;
1779        }
1780
1781        to->tv_sec = ms / MSEC_PER_SEC;
1782        to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC);
1783
1784        ktime_get_ts64(&now);
1785        *to = timespec64_add_safe(now, *to);
1786        return to;
1787}
1788
1789/*
1790 * autoremove_wake_function, but remove even on failure to wake up, because we
1791 * know that default_wake_function/ttwu will only fail if the thread is already
1792 * woken, and in that case the ep_poll loop will remove the entry anyways, not
1793 * try to reuse it.
1794 */
1795static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
1796                                       unsigned int mode, int sync, void *key)
1797{
1798        int ret = default_wake_function(wq_entry, mode, sync, key);
1799
1800        /*
1801         * Pairs with list_empty_careful in ep_poll, and ensures future loop
1802         * iterations see the cause of this wakeup.
1803         */
1804        list_del_init_careful(&wq_entry->entry);
1805        return ret;
1806}
1807
1808/**
1809 * ep_poll - Retrieves ready events, and delivers them to the caller-supplied
1810 *           event buffer.
1811 *
1812 * @ep: Pointer to the eventpoll context.
1813 * @events: Pointer to the userspace buffer where the ready events should be
1814 *          stored.
1815 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1816 * @timeout: Maximum timeout for the ready events fetch operation, in
1817 *           timespec. If the timeout is zero, the function will not block,
1818 *           while if the @timeout ptr is NULL, the function will block
1819 *           until at least one event has been retrieved (or an error
1820 *           occurred).
1821 *
1822 * Return: the number of ready events which have been fetched, or an
1823 *          error code, in case of error.
1824 */
1825static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1826                   int maxevents, struct timespec64 *timeout)
1827{
1828        int res, eavail, timed_out = 0;
1829        u64 slack = 0;
1830        wait_queue_entry_t wait;
1831        ktime_t expires, *to = NULL;
1832
1833        lockdep_assert_irqs_enabled();
1834
1835        if (timeout && (timeout->tv_sec | timeout->tv_nsec)) {
1836                slack = select_estimate_accuracy(timeout);
1837                to = &expires;
1838                *to = timespec64_to_ktime(*timeout);
1839        } else if (timeout) {
1840                /*
1841                 * Avoid the unnecessary trip to the wait queue loop, if the
1842                 * caller specified a non blocking operation.
1843                 */
1844                timed_out = 1;
1845        }
1846
1847        /*
1848         * This call is racy: We may or may not see events that are being added
1849         * to the ready list under the lock (e.g., in IRQ callbacks). For cases
1850         * with a non-zero timeout, this thread will check the ready list under
1851         * lock and will add to the wait queue.  For cases with a zero
1852         * timeout, the user by definition should not care and will have to
1853         * recheck again.
1854         */
1855        eavail = ep_events_available(ep);
1856
1857        while (1) {
1858                if (eavail) {
1859                        /*
1860                         * Try to transfer events to user space. In case we get
1861                         * 0 events and there's still timeout left over, we go
1862                         * trying again in search of more luck.
1863                         */
1864                        res = ep_send_events(ep, events, maxevents);
1865                        if (res)
1866                                return res;
1867                }
1868
1869                if (timed_out)
1870                        return 0;
1871
1872                eavail = ep_busy_loop(ep, timed_out);
1873                if (eavail)
1874                        continue;
1875
1876                if (signal_pending(current))
1877                        return -EINTR;
1878
1879                /*
1880                 * Internally init_wait() uses autoremove_wake_function(),
1881                 * thus wait entry is removed from the wait queue on each
1882                 * wakeup. Why it is important? In case of several waiters
1883                 * each new wakeup will hit the next waiter, giving it the
1884                 * chance to harvest new event. Otherwise wakeup can be
1885                 * lost. This is also good performance-wise, because on
1886                 * normal wakeup path no need to call __remove_wait_queue()
1887                 * explicitly, thus ep->lock is not taken, which halts the
1888                 * event delivery.
1889                 *
1890                 * In fact, we now use an even more aggressive function that
1891                 * unconditionally removes, because we don't reuse the wait
1892                 * entry between loop iterations. This lets us also avoid the
1893                 * performance issue if a process is killed, causing all of its
1894                 * threads to wake up without being removed normally.
1895                 */
1896                init_wait(&wait);
1897                wait.func = ep_autoremove_wake_function;
1898
1899                write_lock_irq(&ep->lock);
1900                /*
1901                 * Barrierless variant, waitqueue_active() is called under
1902                 * the same lock on wakeup ep_poll_callback() side, so it
1903                 * is safe to avoid an explicit barrier.
1904                 */
1905                __set_current_state(TASK_INTERRUPTIBLE);
1906
1907                /*
1908                 * Do the final check under the lock. ep_scan_ready_list()
1909                 * plays with two lists (->rdllist and ->ovflist) and there
1910                 * is always a race when both lists are empty for short
1911                 * period of time although events are pending, so lock is
1912                 * important.
1913                 */
1914                eavail = ep_events_available(ep);
1915                if (!eavail)
1916                        __add_wait_queue_exclusive(&ep->wq, &wait);
1917
1918                write_unlock_irq(&ep->lock);
1919
1920                if (!eavail)
1921                        timed_out = !schedule_hrtimeout_range(to, slack,
1922                                                              HRTIMER_MODE_ABS);
1923                __set_current_state(TASK_RUNNING);
1924
1925                /*
1926                 * We were woken up, thus go and try to harvest some events.
1927                 * If timed out and still on the wait queue, recheck eavail
1928                 * carefully under lock, below.
1929                 */
1930                eavail = 1;
1931
1932                if (!list_empty_careful(&wait.entry)) {
1933                        write_lock_irq(&ep->lock);
1934                        /*
1935                         * If the thread timed out and is not on the wait queue,
1936                         * it means that the thread was woken up after its
1937                         * timeout expired before it could reacquire the lock.
1938                         * Thus, when wait.entry is empty, it needs to harvest
1939                         * events.
1940                         */
1941                        if (timed_out)
1942                                eavail = list_empty(&wait.entry);
1943                        __remove_wait_queue(&ep->wq, &wait);
1944                        write_unlock_irq(&ep->lock);
1945                }
1946        }
1947}
1948
1949/**
1950 * ep_loop_check_proc - verify that adding an epoll file inside another
1951 *                      epoll structure does not violate the constraints, in
1952 *                      terms of closed loops, or too deep chains (which can
1953 *                      result in excessive stack usage).
1954 *
1955 * @ep: the &struct eventpoll to be currently checked.
1956 * @depth: Current depth of the path being checked.
1957 *
1958 * Return: %zero if adding the epoll @file inside current epoll
1959 *          structure @ep does not violate the constraints, or %-1 otherwise.
1960 */
1961static int ep_loop_check_proc(struct eventpoll *ep, int depth)
1962{
1963        int error = 0;
1964        struct rb_node *rbp;
1965        struct epitem *epi;
1966
1967        mutex_lock_nested(&ep->mtx, depth + 1);
1968        ep->gen = loop_check_gen;
1969        for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1970                epi = rb_entry(rbp, struct epitem, rbn);
1971                if (unlikely(is_file_epoll(epi->ffd.file))) {
1972                        struct eventpoll *ep_tovisit;
1973                        ep_tovisit = epi->ffd.file->private_data;
1974                        if (ep_tovisit->gen == loop_check_gen)
1975                                continue;
1976                        if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
1977                                error = -1;
1978                        else
1979                                error = ep_loop_check_proc(ep_tovisit, depth + 1);
1980                        if (error != 0)
1981                                break;
1982                } else {
1983                        /*
1984                         * If we've reached a file that is not associated with
1985                         * an ep, then we need to check if the newly added
1986                         * links are going to add too many wakeup paths. We do
1987                         * this by adding it to the tfile_check_list, if it's
1988                         * not already there, and calling reverse_path_check()
1989                         * during ep_insert().
1990                         */
1991                        list_file(epi->ffd.file);
1992                }
1993        }
1994        mutex_unlock(&ep->mtx);
1995
1996        return error;
1997}
1998
1999/**
2000 * ep_loop_check - Performs a check to verify that adding an epoll file (@to)
2001 *                 into another epoll file (represented by @ep) does not create
2002 *                 closed loops or too deep chains.
2003 *
2004 * @ep: Pointer to the epoll we are inserting into.
2005 * @to: Pointer to the epoll to be inserted.
2006 *
2007 * Return: %zero if adding the epoll @to inside the epoll @from
2008 * does not violate the constraints, or %-1 otherwise.
2009 */
2010static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
2011{
2012        inserting_into = ep;
2013        return ep_loop_check_proc(to, 0);
2014}
2015
2016static void clear_tfile_check_list(void)
2017{
2018        rcu_read_lock();
2019        while (tfile_check_list != EP_UNACTIVE_PTR) {
2020                struct epitems_head *head = tfile_check_list;
2021                tfile_check_list = head->next;
2022                unlist_file(head);
2023        }
2024        rcu_read_unlock();
2025}
2026
2027/*
2028 * Open an eventpoll file descriptor.
2029 */
2030static int do_epoll_create(int flags)
2031{
2032        int error, fd;
2033        struct eventpoll *ep = NULL;
2034        struct file *file;
2035
2036        /* Check the EPOLL_* constant for consistency.  */
2037        BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
2038
2039        if (flags & ~EPOLL_CLOEXEC)
2040                return -EINVAL;
2041        /*
2042         * Create the internal data structure ("struct eventpoll").
2043         */
2044        error = ep_alloc(&ep);
2045        if (error < 0)
2046                return error;
2047        /*
2048         * Creates all the items needed to setup an eventpoll file. That is,
2049         * a file structure and a free file descriptor.
2050         */
2051        fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
2052        if (fd < 0) {
2053                error = fd;
2054                goto out_free_ep;
2055        }
2056        file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
2057                                 O_RDWR | (flags & O_CLOEXEC));
2058        if (IS_ERR(file)) {
2059                error = PTR_ERR(file);
2060                goto out_free_fd;
2061        }
2062        ep->file = file;
2063        fd_install(fd, file);
2064        return fd;
2065
2066out_free_fd:
2067        put_unused_fd(fd);
2068out_free_ep:
2069        ep_clear_and_put(ep);
2070        return error;
2071}
2072
2073SYSCALL_DEFINE1(epoll_create1, int, flags)
2074{
2075        return do_epoll_create(flags);
2076}
2077
2078SYSCALL_DEFINE1(epoll_create, int, size)
2079{
2080        if (size <= 0)
2081                return -EINVAL;
2082
2083        return do_epoll_create(0);
2084}
2085
2086#ifdef CONFIG_PM_SLEEP
2087static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
2088{
2089        if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
2090                epev->events &= ~EPOLLWAKEUP;
2091}
2092#else
2093static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
2094{
2095        epev->events &= ~EPOLLWAKEUP;
2096}
2097#endif
2098
2099static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
2100                                   bool nonblock)
2101{
2102        if (!nonblock) {
2103                mutex_lock_nested(mutex, depth);
2104                return 0;
2105        }
2106        if (mutex_trylock(mutex))
2107                return 0;
2108        return -EAGAIN;
2109}
2110
2111int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2112                 bool nonblock)
2113{
2114        int error;
2115        int full_check = 0;
2116        struct fd f, tf;
2117        struct eventpoll *ep;
2118        struct epitem *epi;
2119        struct eventpoll *tep = NULL;
2120
2121        error = -EBADF;
2122        f = fdget(epfd);
2123        if (!f.file)
2124                goto error_return;
2125
2126        /* Get the "struct file *" for the target file */
2127        tf = fdget(fd);
2128        if (!tf.file)
2129                goto error_fput;
2130
2131        /* The target file descriptor must support poll */
2132        error = -EPERM;
2133        if (!file_can_poll(tf.file))
2134                goto error_tgt_fput;
2135
2136        /* Check if EPOLLWAKEUP is allowed */
2137        if (ep_op_has_event(op))
2138                ep_take_care_of_epollwakeup(epds);
2139
2140        /*
2141         * We have to check that the file structure underneath the file descriptor
2142         * the user passed to us _is_ an eventpoll file. And also we do not permit
2143         * adding an epoll file descriptor inside itself.
2144         */
2145        error = -EINVAL;
2146        if (f.file == tf.file || !is_file_epoll(f.file))
2147                goto error_tgt_fput;
2148
2149        /*
2150         * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
2151         * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
2152         * Also, we do not currently supported nested exclusive wakeups.
2153         */
2154        if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
2155                if (op == EPOLL_CTL_MOD)
2156                        goto error_tgt_fput;
2157                if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
2158                                (epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
2159                        goto error_tgt_fput;
2160        }
2161
2162        /*
2163         * At this point it is safe to assume that the "private_data" contains
2164         * our own data structure.
2165         */
2166        ep = f.file->private_data;
2167
2168        /*
2169         * When we insert an epoll file descriptor inside another epoll file
2170         * descriptor, there is the chance of creating closed loops, which are
2171         * better be handled here, than in more critical paths. While we are
2172         * checking for loops we also determine the list of files reachable
2173         * and hang them on the tfile_check_list, so we can check that we
2174         * haven't created too many possible wakeup paths.
2175         *
2176         * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
2177         * the epoll file descriptor is attaching directly to a wakeup source,
2178         * unless the epoll file descriptor is nested. The purpose of taking the
2179         * 'epnested_mutex' on add is to prevent complex toplogies such as loops and
2180         * deep wakeup paths from forming in parallel through multiple
2181         * EPOLL_CTL_ADD operations.
2182         */
2183        error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2184        if (error)
2185                goto error_tgt_fput;
2186        if (op == EPOLL_CTL_ADD) {
2187                if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
2188                    is_file_epoll(tf.file)) {
2189                        mutex_unlock(&ep->mtx);
2190                        error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
2191                        if (error)
2192                                goto error_tgt_fput;
2193                        loop_check_gen++;
2194                        full_check = 1;
2195                        if (is_file_epoll(tf.file)) {
2196                                tep = tf.file->private_data;
2197                                error = -ELOOP;
2198                                if (ep_loop_check(ep, tep) != 0)
2199                                        goto error_tgt_fput;
2200                        }
2201                        error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2202                        if (error)
2203                                goto error_tgt_fput;
2204                }
2205        }
2206
2207        /*
2208         * Try to lookup the file inside our RB tree. Since we grabbed "mtx"
2209         * above, we can be sure to be able to use the item looked up by
2210         * ep_find() till we release the mutex.
2211         */
2212        epi = ep_find(ep, tf.file, fd);
2213
2214        error = -EINVAL;
2215        switch (op) {
2216        case EPOLL_CTL_ADD:
2217                if (!epi) {
2218                        epds->events |= EPOLLERR | EPOLLHUP;
2219                        error = ep_insert(ep, epds, tf.file, fd, full_check);
2220                } else
2221                        error = -EEXIST;
2222                break;
2223        case EPOLL_CTL_DEL:
2224                if (epi) {
2225                        /*
2226                         * The eventpoll itself is still alive: the refcount
2227                         * can't go to zero here.
2228                         */
2229                        ep_remove_safe(ep, epi);
2230                        error = 0;
2231                } else {
2232                        error = -ENOENT;
2233                }
2234                break;
2235        case EPOLL_CTL_MOD:
2236                if (epi) {
2237                        if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2238                                epds->events |= EPOLLERR | EPOLLHUP;
2239                                error = ep_modify(ep, epi, epds);
2240                        }
2241                } else
2242                        error = -ENOENT;
2243                break;
2244        }
2245        mutex_unlock(&ep->mtx);
2246
2247error_tgt_fput:
2248        if (full_check) {
2249                clear_tfile_check_list();
2250                loop_check_gen++;
2251                mutex_unlock(&epnested_mutex);
2252        }
2253
2254        fdput(tf);
2255error_fput:
2256        fdput(f);
2257error_return:
2258
2259        return error;
2260}
2261
2262/*
2263 * The following function implements the controller interface for
2264 * the eventpoll file that enables the insertion/removal/change of
2265 * file descriptors inside the interest set.
2266 */
2267SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2268                struct epoll_event __user *, event)
2269{
2270        struct epoll_event epds;
2271
2272        if (ep_op_has_event(op) &&
2273            copy_from_user(&epds, event, sizeof(struct epoll_event)))
2274                return -EFAULT;
2275
2276        return do_epoll_ctl(epfd, op, fd, &epds, false);
2277}
2278
2279/*
2280 * Implement the event wait interface for the eventpoll file. It is the kernel
2281 * part of the user space epoll_wait(2).
2282 */
2283static int do_epoll_wait(int epfd, struct epoll_event __user *events,
2284                         int maxevents, struct timespec64 *to)
2285{
2286        int error;
2287        struct fd f;
2288        struct eventpoll *ep;
2289
2290        /* The maximum number of event must be greater than zero */
2291        if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2292                return -EINVAL;
2293
2294        /* Verify that the area passed by the user is writeable */
2295        if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
2296                return -EFAULT;
2297
2298        /* Get the "struct file *" for the eventpoll file */
2299        f = fdget(epfd);
2300        if (!f.file)
2301                return -EBADF;
2302
2303        /*
2304         * We have to check that the file structure underneath the fd
2305         * the user passed to us _is_ an eventpoll file.
2306         */
2307        error = -EINVAL;
2308        if (!is_file_epoll(f.file))
2309                goto error_fput;
2310
2311        /*
2312         * At this point it is safe to assume that the "private_data" contains
2313         * our own data structure.
2314         */
2315        ep = f.file->private_data;
2316
2317        /* Time to fish for events ... */
2318        error = ep_poll(ep, events, maxevents, to);
2319
2320error_fput:
2321        fdput(f);
2322        return error;
2323}
2324
2325SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2326                int, maxevents, int, timeout)
2327{
2328        struct timespec64 to;
2329
2330        return do_epoll_wait(epfd, events, maxevents,
2331                             ep_timeout_to_timespec(&to, timeout));
2332}
2333
2334/*
2335 * Implement the event wait interface for the eventpoll file. It is the kernel
2336 * part of the user space epoll_pwait(2).
2337 */
2338static int do_epoll_pwait(int epfd, struct epoll_event __user *events,
2339                          int maxevents, struct timespec64 *to,
2340                          const sigset_t __user *sigmask, size_t sigsetsize)
2341{
2342        int error;
2343
2344        /*
2345         * If the caller wants a certain signal mask to be set during the wait,
2346         * we apply it here.
2347         */
2348        error = set_user_sigmask(sigmask, sigsetsize);
2349        if (error)
2350                return error;
2351
2352        error = do_epoll_wait(epfd, events, maxevents, to);
2353
2354        restore_saved_sigmask_unless(error == -EINTR);
2355
2356        return error;
2357}
2358
2359SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2360                int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2361                size_t, sigsetsize)
2362{
2363        struct timespec64 to;
2364
2365        return do_epoll_pwait(epfd, events, maxevents,
2366                              ep_timeout_to_timespec(&to, timeout),
2367                              sigmask, sigsetsize);
2368}
2369
2370SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events,
2371                int, maxevents, const struct __kernel_timespec __user *, timeout,
2372                const sigset_t __user *, sigmask, size_t, sigsetsize)
2373{
2374        struct timespec64 ts, *to = NULL;
2375
2376        if (timeout) {
2377                if (get_timespec64(&ts, timeout))
2378                        return -EFAULT;
2379                to = &ts;
2380                if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2381                        return -EINVAL;
2382        }
2383
2384        return do_epoll_pwait(epfd, events, maxevents, to,
2385                              sigmask, sigsetsize);
2386}
2387
2388#ifdef CONFIG_COMPAT
2389static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events,
2390                                 int maxevents, struct timespec64 *timeout,
2391                                 const compat_sigset_t __user *sigmask,
2392                                 compat_size_t sigsetsize)
2393{
2394        long err;
2395
2396        /*
2397         * If the caller wants a certain signal mask to be set during the wait,
2398         * we apply it here.
2399         */
2400        err = set_compat_user_sigmask(sigmask, sigsetsize);
2401        if (err)
2402                return err;
2403
2404        err = do_epoll_wait(epfd, events, maxevents, timeout);
2405
2406        restore_saved_sigmask_unless(err == -EINTR);
2407
2408        return err;
2409}
2410
2411COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2412                       struct epoll_event __user *, events,
2413                       int, maxevents, int, timeout,
2414                       const compat_sigset_t __user *, sigmask,
2415                       compat_size_t, sigsetsize)
2416{
2417        struct timespec64 to;
2418
2419        return do_compat_epoll_pwait(epfd, events, maxevents,
2420                                     ep_timeout_to_timespec(&to, timeout),
2421                                     sigmask, sigsetsize);
2422}
2423
2424COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd,
2425                       struct epoll_event __user *, events,
2426                       int, maxevents,
2427                       const struct __kernel_timespec __user *, timeout,
2428                       const compat_sigset_t __user *, sigmask,
2429                       compat_size_t, sigsetsize)
2430{
2431        struct timespec64 ts, *to = NULL;
2432
2433        if (timeout) {
2434                if (get_timespec64(&ts, timeout))
2435                        return -EFAULT;
2436                to = &ts;
2437                if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2438                        return -EINVAL;
2439        }
2440
2441        return do_compat_epoll_pwait(epfd, events, maxevents, to,
2442                                     sigmask, sigsetsize);
2443}
2444
2445#endif
2446
2447static int __init eventpoll_init(void)
2448{
2449        struct sysinfo si;
2450
2451        si_meminfo(&si);
2452        /*
2453         * Allows top 4% of lomem to be allocated for epoll watches (per user).
2454         */
2455        max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2456                EP_ITEM_COST;
2457        BUG_ON(max_user_watches < 0);
2458
2459        /*
2460         * We can have many thousands of epitems, so prevent this from
2461         * using an extra cache line on 64-bit (and smaller) CPUs
2462         */
2463        BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2464
2465        /* Allocates slab cache used to allocate "struct epitem" items */
2466        epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2467                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
2468
2469        /* Allocates slab cache used to allocate "struct eppoll_entry" */
2470        pwq_cache = kmem_cache_create("eventpoll_pwq",
2471                sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2472        epoll_sysctls_init();
2473
2474        ephead_cache = kmem_cache_create("ep_head",
2475                sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2476
2477        return 0;
2478}
2479fs_initcall(eventpoll_init);
2480