linux/fs/aio.c
<<
>>
Prefs
   1/*
   2 *      An async IO implementation for Linux
   3 *      Written by Benjamin LaHaise <bcrl@kvack.org>
   4 *
   5 *      Implements an efficient asynchronous io interface.
   6 *
   7 *      Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
   8 *
   9 *      See ../COPYING for licensing terms.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/init.h>
  13#include <linux/errno.h>
  14#include <linux/time.h>
  15#include <linux/aio_abi.h>
  16#include <linux/export.h>
  17#include <linux/syscalls.h>
  18#include <linux/backing-dev.h>
  19#include <linux/uio.h>
  20
  21#define DEBUG 0
  22
  23#include <linux/sched.h>
  24#include <linux/fs.h>
  25#include <linux/file.h>
  26#include <linux/mm.h>
  27#include <linux/mman.h>
  28#include <linux/mmu_context.h>
  29#include <linux/slab.h>
  30#include <linux/timer.h>
  31#include <linux/aio.h>
  32#include <linux/highmem.h>
  33#include <linux/workqueue.h>
  34#include <linux/security.h>
  35#include <linux/eventfd.h>
  36#include <linux/blkdev.h>
  37#include <linux/compat.h>
  38
  39#include <asm/kmap_types.h>
  40#include <asm/uaccess.h>
  41
  42#if DEBUG > 1
  43#define dprintk         printk
  44#else
  45#define dprintk(x...)   do { ; } while (0)
  46#endif
  47
  48/*------ sysctl variables----*/
  49static DEFINE_SPINLOCK(aio_nr_lock);
  50unsigned long aio_nr;           /* current system wide number of aio requests */
  51unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
  52/*----end sysctl variables---*/
  53
  54static struct kmem_cache        *kiocb_cachep;
  55static struct kmem_cache        *kioctx_cachep;
  56
  57static struct workqueue_struct *aio_wq;
  58
  59/* Used for rare fput completion. */
  60static void aio_fput_routine(struct work_struct *);
  61static DECLARE_WORK(fput_work, aio_fput_routine);
  62
  63static DEFINE_SPINLOCK(fput_lock);
  64static LIST_HEAD(fput_head);
  65
  66static void aio_kick_handler(struct work_struct *);
  67static void aio_queue_work(struct kioctx *);
  68
  69/* aio_setup
  70 *      Creates the slab caches used by the aio routines, panic on
  71 *      failure as this is done early during the boot sequence.
  72 */
  73static int __init aio_setup(void)
  74{
  75        kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  76        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
  77
  78        aio_wq = alloc_workqueue("aio", 0, 1);  /* used to limit concurrency */
  79        BUG_ON(!aio_wq);
  80
  81        pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
  82
  83        return 0;
  84}
  85__initcall(aio_setup);
  86
  87static void aio_free_ring(struct kioctx *ctx)
  88{
  89        struct aio_ring_info *info = &ctx->ring_info;
  90        long i;
  91
  92        for (i=0; i<info->nr_pages; i++)
  93                put_page(info->ring_pages[i]);
  94
  95        if (info->mmap_size) {
  96                BUG_ON(ctx->mm != current->mm);
  97                vm_munmap(info->mmap_base, info->mmap_size);
  98        }
  99
 100        if (info->ring_pages && info->ring_pages != info->internal_pages)
 101                kfree(info->ring_pages);
 102        info->ring_pages = NULL;
 103        info->nr = 0;
 104}
 105
 106static int aio_setup_ring(struct kioctx *ctx)
 107{
 108        struct aio_ring *ring;
 109        struct aio_ring_info *info = &ctx->ring_info;
 110        unsigned nr_events = ctx->max_reqs;
 111        unsigned long size;
 112        int nr_pages;
 113
 114        /* Compensate for the ring buffer's head/tail overlap entry */
 115        nr_events += 2; /* 1 is required, 2 for good luck */
 116
 117        size = sizeof(struct aio_ring);
 118        size += sizeof(struct io_event) * nr_events;
 119        nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
 120
 121        if (nr_pages < 0)
 122                return -EINVAL;
 123
 124        nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
 125
 126        info->nr = 0;
 127        info->ring_pages = info->internal_pages;
 128        if (nr_pages > AIO_RING_PAGES) {
 129                info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 130                if (!info->ring_pages)
 131                        return -ENOMEM;
 132        }
 133
 134        info->mmap_size = nr_pages * PAGE_SIZE;
 135        dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
 136        down_write(&ctx->mm->mmap_sem);
 137        info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 
 138                                        PROT_READ|PROT_WRITE,
 139                                        MAP_ANONYMOUS|MAP_PRIVATE, 0);
 140        if (IS_ERR((void *)info->mmap_base)) {
 141                up_write(&ctx->mm->mmap_sem);
 142                info->mmap_size = 0;
 143                aio_free_ring(ctx);
 144                return -EAGAIN;
 145        }
 146
 147        dprintk("mmap address: 0x%08lx\n", info->mmap_base);
 148        info->nr_pages = get_user_pages(current, ctx->mm,
 149                                        info->mmap_base, nr_pages, 
 150                                        1, 0, info->ring_pages, NULL);
 151        up_write(&ctx->mm->mmap_sem);
 152
 153        if (unlikely(info->nr_pages != nr_pages)) {
 154                aio_free_ring(ctx);
 155                return -EAGAIN;
 156        }
 157
 158        ctx->user_id = info->mmap_base;
 159
 160        info->nr = nr_events;           /* trusted copy */
 161
 162        ring = kmap_atomic(info->ring_pages[0]);
 163        ring->nr = nr_events;   /* user copy */
 164        ring->id = ctx->user_id;
 165        ring->head = ring->tail = 0;
 166        ring->magic = AIO_RING_MAGIC;
 167        ring->compat_features = AIO_RING_COMPAT_FEATURES;
 168        ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
 169        ring->header_length = sizeof(struct aio_ring);
 170        kunmap_atomic(ring);
 171
 172        return 0;
 173}
 174
 175
 176/* aio_ring_event: returns a pointer to the event at the given index from
 177 * kmap_atomic().  Release the pointer with put_aio_ring_event();
 178 */
 179#define AIO_EVENTS_PER_PAGE     (PAGE_SIZE / sizeof(struct io_event))
 180#define AIO_EVENTS_FIRST_PAGE   ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
 181#define AIO_EVENTS_OFFSET       (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
 182
 183#define aio_ring_event(info, nr) ({                                     \
 184        unsigned pos = (nr) + AIO_EVENTS_OFFSET;                        \
 185        struct io_event *__event;                                       \
 186        __event = kmap_atomic(                                          \
 187                        (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
 188        __event += pos % AIO_EVENTS_PER_PAGE;                           \
 189        __event;                                                        \
 190})
 191
 192#define put_aio_ring_event(event) do {          \
 193        struct io_event *__event = (event);     \
 194        (void)__event;                          \
 195        kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
 196} while(0)
 197
 198static void ctx_rcu_free(struct rcu_head *head)
 199{
 200        struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
 201        kmem_cache_free(kioctx_cachep, ctx);
 202}
 203
 204/* __put_ioctx
 205 *      Called when the last user of an aio context has gone away,
 206 *      and the struct needs to be freed.
 207 */
 208static void __put_ioctx(struct kioctx *ctx)
 209{
 210        unsigned nr_events = ctx->max_reqs;
 211        BUG_ON(ctx->reqs_active);
 212
 213        cancel_delayed_work_sync(&ctx->wq);
 214        aio_free_ring(ctx);
 215        mmdrop(ctx->mm);
 216        ctx->mm = NULL;
 217        if (nr_events) {
 218                spin_lock(&aio_nr_lock);
 219                BUG_ON(aio_nr - nr_events > aio_nr);
 220                aio_nr -= nr_events;
 221                spin_unlock(&aio_nr_lock);
 222        }
 223        pr_debug("__put_ioctx: freeing %p\n", ctx);
 224        call_rcu(&ctx->rcu_head, ctx_rcu_free);
 225}
 226
 227static inline int try_get_ioctx(struct kioctx *kioctx)
 228{
 229        return atomic_inc_not_zero(&kioctx->users);
 230}
 231
 232static inline void put_ioctx(struct kioctx *kioctx)
 233{
 234        BUG_ON(atomic_read(&kioctx->users) <= 0);
 235        if (unlikely(atomic_dec_and_test(&kioctx->users)))
 236                __put_ioctx(kioctx);
 237}
 238
 239/* ioctx_alloc
 240 *      Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
 241 */
 242static struct kioctx *ioctx_alloc(unsigned nr_events)
 243{
 244        struct mm_struct *mm;
 245        struct kioctx *ctx;
 246        int err = -ENOMEM;
 247
 248        /* Prevent overflows */
 249        if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
 250            (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
 251                pr_debug("ENOMEM: nr_events too high\n");
 252                return ERR_PTR(-EINVAL);
 253        }
 254
 255        if (!nr_events || (unsigned long)nr_events > aio_max_nr)
 256                return ERR_PTR(-EAGAIN);
 257
 258        ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
 259        if (!ctx)
 260                return ERR_PTR(-ENOMEM);
 261
 262        ctx->max_reqs = nr_events;
 263        mm = ctx->mm = current->mm;
 264        atomic_inc(&mm->mm_count);
 265
 266        atomic_set(&ctx->users, 2);
 267        spin_lock_init(&ctx->ctx_lock);
 268        spin_lock_init(&ctx->ring_info.ring_lock);
 269        init_waitqueue_head(&ctx->wait);
 270
 271        INIT_LIST_HEAD(&ctx->active_reqs);
 272        INIT_LIST_HEAD(&ctx->run_list);
 273        INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 274
 275        if (aio_setup_ring(ctx) < 0)
 276                goto out_freectx;
 277
 278        /* limit the number of system wide aios */
 279        spin_lock(&aio_nr_lock);
 280        if (aio_nr + nr_events > aio_max_nr ||
 281            aio_nr + nr_events < aio_nr) {
 282                spin_unlock(&aio_nr_lock);
 283                goto out_cleanup;
 284        }
 285        aio_nr += ctx->max_reqs;
 286        spin_unlock(&aio_nr_lock);
 287
 288        /* now link into global list. */
 289        spin_lock(&mm->ioctx_lock);
 290        hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
 291        spin_unlock(&mm->ioctx_lock);
 292
 293        dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
 294                ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
 295        return ctx;
 296
 297out_cleanup:
 298        err = -EAGAIN;
 299        aio_free_ring(ctx);
 300out_freectx:
 301        mmdrop(mm);
 302        kmem_cache_free(kioctx_cachep, ctx);
 303        dprintk("aio: error allocating ioctx %d\n", err);
 304        return ERR_PTR(err);
 305}
 306
 307/* kill_ctx
 308 *      Cancels all outstanding aio requests on an aio context.  Used 
 309 *      when the processes owning a context have all exited to encourage 
 310 *      the rapid destruction of the kioctx.
 311 */
 312static void kill_ctx(struct kioctx *ctx)
 313{
 314        int (*cancel)(struct kiocb *, struct io_event *);
 315        struct task_struct *tsk = current;
 316        DECLARE_WAITQUEUE(wait, tsk);
 317        struct io_event res;
 318
 319        spin_lock_irq(&ctx->ctx_lock);
 320        ctx->dead = 1;
 321        while (!list_empty(&ctx->active_reqs)) {
 322                struct list_head *pos = ctx->active_reqs.next;
 323                struct kiocb *iocb = list_kiocb(pos);
 324                list_del_init(&iocb->ki_list);
 325                cancel = iocb->ki_cancel;
 326                kiocbSetCancelled(iocb);
 327                if (cancel) {
 328                        iocb->ki_users++;
 329                        spin_unlock_irq(&ctx->ctx_lock);
 330                        cancel(iocb, &res);
 331                        spin_lock_irq(&ctx->ctx_lock);
 332                }
 333        }
 334
 335        if (!ctx->reqs_active)
 336                goto out;
 337
 338        add_wait_queue(&ctx->wait, &wait);
 339        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 340        while (ctx->reqs_active) {
 341                spin_unlock_irq(&ctx->ctx_lock);
 342                io_schedule();
 343                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 344                spin_lock_irq(&ctx->ctx_lock);
 345        }
 346        __set_task_state(tsk, TASK_RUNNING);
 347        remove_wait_queue(&ctx->wait, &wait);
 348
 349out:
 350        spin_unlock_irq(&ctx->ctx_lock);
 351}
 352
 353/* wait_on_sync_kiocb:
 354 *      Waits on the given sync kiocb to complete.
 355 */
 356ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
 357{
 358        while (iocb->ki_users) {
 359                set_current_state(TASK_UNINTERRUPTIBLE);
 360                if (!iocb->ki_users)
 361                        break;
 362                io_schedule();
 363        }
 364        __set_current_state(TASK_RUNNING);
 365        return iocb->ki_user_data;
 366}
 367EXPORT_SYMBOL(wait_on_sync_kiocb);
 368
 369/* exit_aio: called when the last user of mm goes away.  At this point, 
 370 * there is no way for any new requests to be submited or any of the 
 371 * io_* syscalls to be called on the context.  However, there may be 
 372 * outstanding requests which hold references to the context; as they 
 373 * go away, they will call put_ioctx and release any pinned memory
 374 * associated with the request (held via struct page * references).
 375 */
 376void exit_aio(struct mm_struct *mm)
 377{
 378        struct kioctx *ctx;
 379
 380        while (!hlist_empty(&mm->ioctx_list)) {
 381                ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
 382                hlist_del_rcu(&ctx->list);
 383
 384                kill_ctx(ctx);
 385
 386                if (1 != atomic_read(&ctx->users))
 387                        printk(KERN_DEBUG
 388                                "exit_aio:ioctx still alive: %d %d %d\n",
 389                                atomic_read(&ctx->users), ctx->dead,
 390                                ctx->reqs_active);
 391                /*
 392                 * We don't need to bother with munmap() here -
 393                 * exit_mmap(mm) is coming and it'll unmap everything.
 394                 * Since aio_free_ring() uses non-zero ->mmap_size
 395                 * as indicator that it needs to unmap the area,
 396                 * just set it to 0; aio_free_ring() is the only
 397                 * place that uses ->mmap_size, so it's safe.
 398                 * That way we get all munmap done to current->mm -
 399                 * all other callers have ctx->mm == current->mm.
 400                 */
 401                ctx->ring_info.mmap_size = 0;
 402                put_ioctx(ctx);
 403        }
 404}
 405
 406/* aio_get_req
 407 *      Allocate a slot for an aio request.  Increments the users count
 408 * of the kioctx so that the kioctx stays around until all requests are
 409 * complete.  Returns NULL if no requests are free.
 410 *
 411 * Returns with kiocb->users set to 2.  The io submit code path holds
 412 * an extra reference while submitting the i/o.
 413 * This prevents races between the aio code path referencing the
 414 * req (after submitting it) and aio_complete() freeing the req.
 415 */
 416static struct kiocb *__aio_get_req(struct kioctx *ctx)
 417{
 418        struct kiocb *req = NULL;
 419
 420        req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
 421        if (unlikely(!req))
 422                return NULL;
 423
 424        req->ki_flags = 0;
 425        req->ki_users = 2;
 426        req->ki_key = 0;
 427        req->ki_ctx = ctx;
 428        req->ki_cancel = NULL;
 429        req->ki_retry = NULL;
 430        req->ki_dtor = NULL;
 431        req->private = NULL;
 432        req->ki_iovec = NULL;
 433        INIT_LIST_HEAD(&req->ki_run_list);
 434        req->ki_eventfd = NULL;
 435
 436        return req;
 437}
 438
 439/*
 440 * struct kiocb's are allocated in batches to reduce the number of
 441 * times the ctx lock is acquired and released.
 442 */
 443#define KIOCB_BATCH_SIZE        32L
 444struct kiocb_batch {
 445        struct list_head head;
 446        long count; /* number of requests left to allocate */
 447};
 448
 449static void kiocb_batch_init(struct kiocb_batch *batch, long total)
 450{
 451        INIT_LIST_HEAD(&batch->head);
 452        batch->count = total;
 453}
 454
 455static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
 456{
 457        struct kiocb *req, *n;
 458
 459        if (list_empty(&batch->head))
 460                return;
 461
 462        spin_lock_irq(&ctx->ctx_lock);
 463        list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
 464                list_del(&req->ki_batch);
 465                list_del(&req->ki_list);
 466                kmem_cache_free(kiocb_cachep, req);
 467                ctx->reqs_active--;
 468        }
 469        if (unlikely(!ctx->reqs_active && ctx->dead))
 470                wake_up_all(&ctx->wait);
 471        spin_unlock_irq(&ctx->ctx_lock);
 472}
 473
 474/*
 475 * Allocate a batch of kiocbs.  This avoids taking and dropping the
 476 * context lock a lot during setup.
 477 */
 478static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
 479{
 480        unsigned short allocated, to_alloc;
 481        long avail;
 482        bool called_fput = false;
 483        struct kiocb *req, *n;
 484        struct aio_ring *ring;
 485
 486        to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
 487        for (allocated = 0; allocated < to_alloc; allocated++) {
 488                req = __aio_get_req(ctx);
 489                if (!req)
 490                        /* allocation failed, go with what we've got */
 491                        break;
 492                list_add(&req->ki_batch, &batch->head);
 493        }
 494
 495        if (allocated == 0)
 496                goto out;
 497
 498retry:
 499        spin_lock_irq(&ctx->ctx_lock);
 500        ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
 501
 502        avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
 503        BUG_ON(avail < 0);
 504        if (avail == 0 && !called_fput) {
 505                /*
 506                 * Handle a potential starvation case.  It is possible that
 507                 * we hold the last reference on a struct file, causing us
 508                 * to delay the final fput to non-irq context.  In this case,
 509                 * ctx->reqs_active is artificially high.  Calling the fput
 510                 * routine here may free up a slot in the event completion
 511                 * ring, allowing this allocation to succeed.
 512                 */
 513                kunmap_atomic(ring);
 514                spin_unlock_irq(&ctx->ctx_lock);
 515                aio_fput_routine(NULL);
 516                called_fput = true;
 517                goto retry;
 518        }
 519
 520        if (avail < allocated) {
 521                /* Trim back the number of requests. */
 522                list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
 523                        list_del(&req->ki_batch);
 524                        kmem_cache_free(kiocb_cachep, req);
 525                        if (--allocated <= avail)
 526                                break;
 527                }
 528        }
 529
 530        batch->count -= allocated;
 531        list_for_each_entry(req, &batch->head, ki_batch) {
 532                list_add(&req->ki_list, &ctx->active_reqs);
 533                ctx->reqs_active++;
 534        }
 535
 536        kunmap_atomic(ring);
 537        spin_unlock_irq(&ctx->ctx_lock);
 538
 539out:
 540        return allocated;
 541}
 542
 543static inline struct kiocb *aio_get_req(struct kioctx *ctx,
 544                                        struct kiocb_batch *batch)
 545{
 546        struct kiocb *req;
 547
 548        if (list_empty(&batch->head))
 549                if (kiocb_batch_refill(ctx, batch) == 0)
 550                        return NULL;
 551        req = list_first_entry(&batch->head, struct kiocb, ki_batch);
 552        list_del(&req->ki_batch);
 553        return req;
 554}
 555
 556static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 557{
 558        assert_spin_locked(&ctx->ctx_lock);
 559
 560        if (req->ki_eventfd != NULL)
 561                eventfd_ctx_put(req->ki_eventfd);
 562        if (req->ki_dtor)
 563                req->ki_dtor(req);
 564        if (req->ki_iovec != &req->ki_inline_vec)
 565                kfree(req->ki_iovec);
 566        kmem_cache_free(kiocb_cachep, req);
 567        ctx->reqs_active--;
 568
 569        if (unlikely(!ctx->reqs_active && ctx->dead))
 570                wake_up_all(&ctx->wait);
 571}
 572
 573static void aio_fput_routine(struct work_struct *data)
 574{
 575        spin_lock_irq(&fput_lock);
 576        while (likely(!list_empty(&fput_head))) {
 577                struct kiocb *req = list_kiocb(fput_head.next);
 578                struct kioctx *ctx = req->ki_ctx;
 579
 580                list_del(&req->ki_list);
 581                spin_unlock_irq(&fput_lock);
 582
 583                /* Complete the fput(s) */
 584                if (req->ki_filp != NULL)
 585                        fput(req->ki_filp);
 586
 587                /* Link the iocb into the context's free list */
 588                rcu_read_lock();
 589                spin_lock_irq(&ctx->ctx_lock);
 590                really_put_req(ctx, req);
 591                /*
 592                 * at that point ctx might've been killed, but actual
 593                 * freeing is RCU'd
 594                 */
 595                spin_unlock_irq(&ctx->ctx_lock);
 596                rcu_read_unlock();
 597
 598                spin_lock_irq(&fput_lock);
 599        }
 600        spin_unlock_irq(&fput_lock);
 601}
 602
 603/* __aio_put_req
 604 *      Returns true if this put was the last user of the request.
 605 */
 606static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
 607{
 608        dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
 609                req, atomic_long_read(&req->ki_filp->f_count));
 610
 611        assert_spin_locked(&ctx->ctx_lock);
 612
 613        req->ki_users--;
 614        BUG_ON(req->ki_users < 0);
 615        if (likely(req->ki_users))
 616                return 0;
 617        list_del(&req->ki_list);                /* remove from active_reqs */
 618        req->ki_cancel = NULL;
 619        req->ki_retry = NULL;
 620
 621        /*
 622         * Try to optimize the aio and eventfd file* puts, by avoiding to
 623         * schedule work in case it is not final fput() time. In normal cases,
 624         * we would not be holding the last reference to the file*, so
 625         * this function will be executed w/out any aio kthread wakeup.
 626         */
 627        if (unlikely(!fput_atomic(req->ki_filp))) {
 628                spin_lock(&fput_lock);
 629                list_add(&req->ki_list, &fput_head);
 630                spin_unlock(&fput_lock);
 631                schedule_work(&fput_work);
 632        } else {
 633                req->ki_filp = NULL;
 634                really_put_req(ctx, req);
 635        }
 636        return 1;
 637}
 638
 639/* aio_put_req
 640 *      Returns true if this put was the last user of the kiocb,
 641 *      false if the request is still in use.
 642 */
 643int aio_put_req(struct kiocb *req)
 644{
 645        struct kioctx *ctx = req->ki_ctx;
 646        int ret;
 647        spin_lock_irq(&ctx->ctx_lock);
 648        ret = __aio_put_req(ctx, req);
 649        spin_unlock_irq(&ctx->ctx_lock);
 650        return ret;
 651}
 652EXPORT_SYMBOL(aio_put_req);
 653
 654static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 655{
 656        struct mm_struct *mm = current->mm;
 657        struct kioctx *ctx, *ret = NULL;
 658        struct hlist_node *n;
 659
 660        rcu_read_lock();
 661
 662        hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
 663                /*
 664                 * RCU protects us against accessing freed memory but
 665                 * we have to be careful not to get a reference when the
 666                 * reference count already dropped to 0 (ctx->dead test
 667                 * is unreliable because of races).
 668                 */
 669                if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
 670                        ret = ctx;
 671                        break;
 672                }
 673        }
 674
 675        rcu_read_unlock();
 676        return ret;
 677}
 678
 679/*
 680 * Queue up a kiocb to be retried. Assumes that the kiocb
 681 * has already been marked as kicked, and places it on
 682 * the retry run list for the corresponding ioctx, if it
 683 * isn't already queued. Returns 1 if it actually queued
 684 * the kiocb (to tell the caller to activate the work
 685 * queue to process it), or 0, if it found that it was
 686 * already queued.
 687 */
 688static inline int __queue_kicked_iocb(struct kiocb *iocb)
 689{
 690        struct kioctx *ctx = iocb->ki_ctx;
 691
 692        assert_spin_locked(&ctx->ctx_lock);
 693
 694        if (list_empty(&iocb->ki_run_list)) {
 695                list_add_tail(&iocb->ki_run_list,
 696                        &ctx->run_list);
 697                return 1;
 698        }
 699        return 0;
 700}
 701
 702/* aio_run_iocb
 703 *      This is the core aio execution routine. It is
 704 *      invoked both for initial i/o submission and
 705 *      subsequent retries via the aio_kick_handler.
 706 *      Expects to be invoked with iocb->ki_ctx->lock
 707 *      already held. The lock is released and reacquired
 708 *      as needed during processing.
 709 *
 710 * Calls the iocb retry method (already setup for the
 711 * iocb on initial submission) for operation specific
 712 * handling, but takes care of most of common retry
 713 * execution details for a given iocb. The retry method
 714 * needs to be non-blocking as far as possible, to avoid
 715 * holding up other iocbs waiting to be serviced by the
 716 * retry kernel thread.
 717 *
 718 * The trickier parts in this code have to do with
 719 * ensuring that only one retry instance is in progress
 720 * for a given iocb at any time. Providing that guarantee
 721 * simplifies the coding of individual aio operations as
 722 * it avoids various potential races.
 723 */
 724static ssize_t aio_run_iocb(struct kiocb *iocb)
 725{
 726        struct kioctx   *ctx = iocb->ki_ctx;
 727        ssize_t (*retry)(struct kiocb *);
 728        ssize_t ret;
 729
 730        if (!(retry = iocb->ki_retry)) {
 731                printk("aio_run_iocb: iocb->ki_retry = NULL\n");
 732                return 0;
 733        }
 734
 735        /*
 736         * We don't want the next retry iteration for this
 737         * operation to start until this one has returned and
 738         * updated the iocb state. However, wait_queue functions
 739         * can trigger a kick_iocb from interrupt context in the
 740         * meantime, indicating that data is available for the next
 741         * iteration. We want to remember that and enable the
 742         * next retry iteration _after_ we are through with
 743         * this one.
 744         *
 745         * So, in order to be able to register a "kick", but
 746         * prevent it from being queued now, we clear the kick
 747         * flag, but make the kick code *think* that the iocb is
 748         * still on the run list until we are actually done.
 749         * When we are done with this iteration, we check if
 750         * the iocb was kicked in the meantime and if so, queue
 751         * it up afresh.
 752         */
 753
 754        kiocbClearKicked(iocb);
 755
 756        /*
 757         * This is so that aio_complete knows it doesn't need to
 758         * pull the iocb off the run list (We can't just call
 759         * INIT_LIST_HEAD because we don't want a kick_iocb to
 760         * queue this on the run list yet)
 761         */
 762        iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
 763        spin_unlock_irq(&ctx->ctx_lock);
 764
 765        /* Quit retrying if the i/o has been cancelled */
 766        if (kiocbIsCancelled(iocb)) {
 767                ret = -EINTR;
 768                aio_complete(iocb, ret, 0);
 769                /* must not access the iocb after this */
 770                goto out;
 771        }
 772
 773        /*
 774         * Now we are all set to call the retry method in async
 775         * context.
 776         */
 777        ret = retry(iocb);
 778
 779        if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
 780                /*
 781                 * There's no easy way to restart the syscall since other AIO's
 782                 * may be already running. Just fail this IO with EINTR.
 783                 */
 784                if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
 785                             ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
 786                        ret = -EINTR;
 787                aio_complete(iocb, ret, 0);
 788        }
 789out:
 790        spin_lock_irq(&ctx->ctx_lock);
 791
 792        if (-EIOCBRETRY == ret) {
 793                /*
 794                 * OK, now that we are done with this iteration
 795                 * and know that there is more left to go,
 796                 * this is where we let go so that a subsequent
 797                 * "kick" can start the next iteration
 798                 */
 799
 800                /* will make __queue_kicked_iocb succeed from here on */
 801                INIT_LIST_HEAD(&iocb->ki_run_list);
 802                /* we must queue the next iteration ourselves, if it
 803                 * has already been kicked */
 804                if (kiocbIsKicked(iocb)) {
 805                        __queue_kicked_iocb(iocb);
 806
 807                        /*
 808                         * __queue_kicked_iocb will always return 1 here, because
 809                         * iocb->ki_run_list is empty at this point so it should
 810                         * be safe to unconditionally queue the context into the
 811                         * work queue.
 812                         */
 813                        aio_queue_work(ctx);
 814                }
 815        }
 816        return ret;
 817}
 818
 819/*
 820 * __aio_run_iocbs:
 821 *      Process all pending retries queued on the ioctx
 822 *      run list.
 823 * Assumes it is operating within the aio issuer's mm
 824 * context.
 825 */
 826static int __aio_run_iocbs(struct kioctx *ctx)
 827{
 828        struct kiocb *iocb;
 829        struct list_head run_list;
 830
 831        assert_spin_locked(&ctx->ctx_lock);
 832
 833        list_replace_init(&ctx->run_list, &run_list);
 834        while (!list_empty(&run_list)) {
 835                iocb = list_entry(run_list.next, struct kiocb,
 836                        ki_run_list);
 837                list_del(&iocb->ki_run_list);
 838                /*
 839                 * Hold an extra reference while retrying i/o.
 840                 */
 841                iocb->ki_users++;       /* grab extra reference */
 842                aio_run_iocb(iocb);
 843                __aio_put_req(ctx, iocb);
 844        }
 845        if (!list_empty(&ctx->run_list))
 846                return 1;
 847        return 0;
 848}
 849
 850static void aio_queue_work(struct kioctx * ctx)
 851{
 852        unsigned long timeout;
 853        /*
 854         * if someone is waiting, get the work started right
 855         * away, otherwise, use a longer delay
 856         */
 857        smp_mb();
 858        if (waitqueue_active(&ctx->wait))
 859                timeout = 1;
 860        else
 861                timeout = HZ/10;
 862        queue_delayed_work(aio_wq, &ctx->wq, timeout);
 863}
 864
 865/*
 866 * aio_run_all_iocbs:
 867 *      Process all pending retries queued on the ioctx
 868 *      run list, and keep running them until the list
 869 *      stays empty.
 870 * Assumes it is operating within the aio issuer's mm context.
 871 */
 872static inline void aio_run_all_iocbs(struct kioctx *ctx)
 873{
 874        spin_lock_irq(&ctx->ctx_lock);
 875        while (__aio_run_iocbs(ctx))
 876                ;
 877        spin_unlock_irq(&ctx->ctx_lock);
 878}
 879
 880/*
 881 * aio_kick_handler:
 882 *      Work queue handler triggered to process pending
 883 *      retries on an ioctx. Takes on the aio issuer's
 884 *      mm context before running the iocbs, so that
 885 *      copy_xxx_user operates on the issuer's address
 886 *      space.
 887 * Run on aiod's context.
 888 */
 889static void aio_kick_handler(struct work_struct *work)
 890{
 891        struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
 892        mm_segment_t oldfs = get_fs();
 893        struct mm_struct *mm;
 894        int requeue;
 895
 896        set_fs(USER_DS);
 897        use_mm(ctx->mm);
 898        spin_lock_irq(&ctx->ctx_lock);
 899        requeue =__aio_run_iocbs(ctx);
 900        mm = ctx->mm;
 901        spin_unlock_irq(&ctx->ctx_lock);
 902        unuse_mm(mm);
 903        set_fs(oldfs);
 904        /*
 905         * we're in a worker thread already; no point using non-zero delay
 906         */
 907        if (requeue)
 908                queue_delayed_work(aio_wq, &ctx->wq, 0);
 909}
 910
 911
 912/*
 913 * Called by kick_iocb to queue the kiocb for retry
 914 * and if required activate the aio work queue to process
 915 * it
 916 */
 917static void try_queue_kicked_iocb(struct kiocb *iocb)
 918{
 919        struct kioctx   *ctx = iocb->ki_ctx;
 920        unsigned long flags;
 921        int run = 0;
 922
 923        spin_lock_irqsave(&ctx->ctx_lock, flags);
 924        /* set this inside the lock so that we can't race with aio_run_iocb()
 925         * testing it and putting the iocb on the run list under the lock */
 926        if (!kiocbTryKick(iocb))
 927                run = __queue_kicked_iocb(iocb);
 928        spin_unlock_irqrestore(&ctx->ctx_lock, flags);
 929        if (run)
 930                aio_queue_work(ctx);
 931}
 932
 933/*
 934 * kick_iocb:
 935 *      Called typically from a wait queue callback context
 936 *      to trigger a retry of the iocb.
 937 *      The retry is usually executed by aio workqueue
 938 *      threads (See aio_kick_handler).
 939 */
 940void kick_iocb(struct kiocb *iocb)
 941{
 942        /* sync iocbs are easy: they can only ever be executing from a 
 943         * single context. */
 944        if (is_sync_kiocb(iocb)) {
 945                kiocbSetKicked(iocb);
 946                wake_up_process(iocb->ki_obj.tsk);
 947                return;
 948        }
 949
 950        try_queue_kicked_iocb(iocb);
 951}
 952EXPORT_SYMBOL(kick_iocb);
 953
 954/* aio_complete
 955 *      Called when the io request on the given iocb is complete.
 956 *      Returns true if this is the last user of the request.  The 
 957 *      only other user of the request can be the cancellation code.
 958 */
 959int aio_complete(struct kiocb *iocb, long res, long res2)
 960{
 961        struct kioctx   *ctx = iocb->ki_ctx;
 962        struct aio_ring_info    *info;
 963        struct aio_ring *ring;
 964        struct io_event *event;
 965        unsigned long   flags;
 966        unsigned long   tail;
 967        int             ret;
 968
 969        /*
 970         * Special case handling for sync iocbs:
 971         *  - events go directly into the iocb for fast handling
 972         *  - the sync task with the iocb in its stack holds the single iocb
 973         *    ref, no other paths have a way to get another ref
 974         *  - the sync task helpfully left a reference to itself in the iocb
 975         */
 976        if (is_sync_kiocb(iocb)) {
 977                BUG_ON(iocb->ki_users != 1);
 978                iocb->ki_user_data = res;
 979                iocb->ki_users = 0;
 980                wake_up_process(iocb->ki_obj.tsk);
 981                return 1;
 982        }
 983
 984        info = &ctx->ring_info;
 985
 986        /* add a completion event to the ring buffer.
 987         * must be done holding ctx->ctx_lock to prevent
 988         * other code from messing with the tail
 989         * pointer since we might be called from irq
 990         * context.
 991         */
 992        spin_lock_irqsave(&ctx->ctx_lock, flags);
 993
 994        if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
 995                list_del_init(&iocb->ki_run_list);
 996
 997        /*
 998         * cancelled requests don't get events, userland was given one
 999         * when the event got cancelled.
1000         */
1001        if (kiocbIsCancelled(iocb))
1002                goto put_rq;
1003
1004        ring = kmap_atomic(info->ring_pages[0]);
1005
1006        tail = info->tail;
1007        event = aio_ring_event(info, tail);
1008        if (++tail >= info->nr)
1009                tail = 0;
1010
1011        event->obj = (u64)(unsigned long)iocb->ki_obj.user;
1012        event->data = iocb->ki_user_data;
1013        event->res = res;
1014        event->res2 = res2;
1015
1016        dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
1017                ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
1018                res, res2);
1019
1020        /* after flagging the request as done, we
1021         * must never even look at it again
1022         */
1023        smp_wmb();      /* make event visible before updating tail */
1024
1025        info->tail = tail;
1026        ring->tail = tail;
1027
1028        put_aio_ring_event(event);
1029        kunmap_atomic(ring);
1030
1031        pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1032
1033        /*
1034         * Check if the user asked us to deliver the result through an
1035         * eventfd. The eventfd_signal() function is safe to be called
1036         * from IRQ context.
1037         */
1038        if (iocb->ki_eventfd != NULL)
1039                eventfd_signal(iocb->ki_eventfd, 1);
1040
1041put_rq:
1042        /* everything turned out well, dispose of the aiocb. */
1043        ret = __aio_put_req(ctx, iocb);
1044
1045        /*
1046         * We have to order our ring_info tail store above and test
1047         * of the wait list below outside the wait lock.  This is
1048         * like in wake_up_bit() where clearing a bit has to be
1049         * ordered with the unlocked test.
1050         */
1051        smp_mb();
1052
1053        if (waitqueue_active(&ctx->wait))
1054                wake_up(&ctx->wait);
1055
1056        spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1057        return ret;
1058}
1059EXPORT_SYMBOL(aio_complete);
1060
1061/* aio_read_evt
1062 *      Pull an event off of the ioctx's event ring.  Returns the number of 
1063 *      events fetched (0 or 1 ;-)
1064 *      FIXME: make this use cmpxchg.
1065 *      TODO: make the ringbuffer user mmap()able (requires FIXME).
1066 */
1067static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1068{
1069        struct aio_ring_info *info = &ioctx->ring_info;
1070        struct aio_ring *ring;
1071        unsigned long head;
1072        int ret = 0;
1073
1074        ring = kmap_atomic(info->ring_pages[0]);
1075        dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1076                 (unsigned long)ring->head, (unsigned long)ring->tail,
1077                 (unsigned long)ring->nr);
1078
1079        if (ring->head == ring->tail)
1080                goto out;
1081
1082        spin_lock(&info->ring_lock);
1083
1084        head = ring->head % info->nr;
1085        if (head != ring->tail) {
1086                struct io_event *evp = aio_ring_event(info, head);
1087                *ent = *evp;
1088                head = (head + 1) % info->nr;
1089                smp_mb(); /* finish reading the event before updatng the head */
1090                ring->head = head;
1091                ret = 1;
1092                put_aio_ring_event(evp);
1093        }
1094        spin_unlock(&info->ring_lock);
1095
1096out:
1097        kunmap_atomic(ring);
1098        dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
1099                 (unsigned long)ring->head, (unsigned long)ring->tail);
1100        return ret;
1101}
1102
1103struct aio_timeout {
1104        struct timer_list       timer;
1105        int                     timed_out;
1106        struct task_struct      *p;
1107};
1108
1109static void timeout_func(unsigned long data)
1110{
1111        struct aio_timeout *to = (struct aio_timeout *)data;
1112
1113        to->timed_out = 1;
1114        wake_up_process(to->p);
1115}
1116
1117static inline void init_timeout(struct aio_timeout *to)
1118{
1119        setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1120        to->timed_out = 0;
1121        to->p = current;
1122}
1123
1124static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1125                               const struct timespec *ts)
1126{
1127        to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1128        if (time_after(to->timer.expires, jiffies))
1129                add_timer(&to->timer);
1130        else
1131                to->timed_out = 1;
1132}
1133
1134static inline void clear_timeout(struct aio_timeout *to)
1135{
1136        del_singleshot_timer_sync(&to->timer);
1137}
1138
1139static int read_events(struct kioctx *ctx,
1140                        long min_nr, long nr,
1141                        struct io_event __user *event,
1142                        struct timespec __user *timeout)
1143{
1144        long                    start_jiffies = jiffies;
1145        struct task_struct      *tsk = current;
1146        DECLARE_WAITQUEUE(wait, tsk);
1147        int                     ret;
1148        int                     i = 0;
1149        struct io_event         ent;
1150        struct aio_timeout      to;
1151        int                     retry = 0;
1152
1153        /* needed to zero any padding within an entry (there shouldn't be 
1154         * any, but C is fun!
1155         */
1156        memset(&ent, 0, sizeof(ent));
1157retry:
1158        ret = 0;
1159        while (likely(i < nr)) {
1160                ret = aio_read_evt(ctx, &ent);
1161                if (unlikely(ret <= 0))
1162                        break;
1163
1164                dprintk("read event: %Lx %Lx %Lx %Lx\n",
1165                        ent.data, ent.obj, ent.res, ent.res2);
1166
1167                /* Could we split the check in two? */
1168                ret = -EFAULT;
1169                if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1170                        dprintk("aio: lost an event due to EFAULT.\n");
1171                        break;
1172                }
1173                ret = 0;
1174
1175                /* Good, event copied to userland, update counts. */
1176                event ++;
1177                i ++;
1178        }
1179
1180        if (min_nr <= i)
1181                return i;
1182        if (ret)
1183                return ret;
1184
1185        /* End fast path */
1186
1187        /* racey check, but it gets redone */
1188        if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1189                retry = 1;
1190                aio_run_all_iocbs(ctx);
1191                goto retry;
1192        }
1193
1194        init_timeout(&to);
1195        if (timeout) {
1196                struct timespec ts;
1197                ret = -EFAULT;
1198                if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1199                        goto out;
1200
1201                set_timeout(start_jiffies, &to, &ts);
1202        }
1203
1204        while (likely(i < nr)) {
1205                add_wait_queue_exclusive(&ctx->wait, &wait);
1206                do {
1207                        set_task_state(tsk, TASK_INTERRUPTIBLE);
1208                        ret = aio_read_evt(ctx, &ent);
1209                        if (ret)
1210                                break;
1211                        if (min_nr <= i)
1212                                break;
1213                        if (unlikely(ctx->dead)) {
1214                                ret = -EINVAL;
1215                                break;
1216                        }
1217                        if (to.timed_out)       /* Only check after read evt */
1218                                break;
1219                        /* Try to only show up in io wait if there are ops
1220                         *  in flight */
1221                        if (ctx->reqs_active)
1222                                io_schedule();
1223                        else
1224                                schedule();
1225                        if (signal_pending(tsk)) {
1226                                ret = -EINTR;
1227                                break;
1228                        }
1229                        /*ret = aio_read_evt(ctx, &ent);*/
1230                } while (1) ;
1231
1232                set_task_state(tsk, TASK_RUNNING);
1233                remove_wait_queue(&ctx->wait, &wait);
1234
1235                if (unlikely(ret <= 0))
1236                        break;
1237
1238                ret = -EFAULT;
1239                if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1240                        dprintk("aio: lost an event due to EFAULT.\n");
1241                        break;
1242                }
1243
1244                /* Good, event copied to userland, update counts. */
1245                event ++;
1246                i ++;
1247        }
1248
1249        if (timeout)
1250                clear_timeout(&to);
1251out:
1252        destroy_timer_on_stack(&to.timer);
1253        return i ? i : ret;
1254}
1255
1256/* Take an ioctx and remove it from the list of ioctx's.  Protects 
1257 * against races with itself via ->dead.
1258 */
1259static void io_destroy(struct kioctx *ioctx)
1260{
1261        struct mm_struct *mm = current->mm;
1262        int was_dead;
1263
1264        /* delete the entry from the list is someone else hasn't already */
1265        spin_lock(&mm->ioctx_lock);
1266        was_dead = ioctx->dead;
1267        ioctx->dead = 1;
1268        hlist_del_rcu(&ioctx->list);
1269        spin_unlock(&mm->ioctx_lock);
1270
1271        dprintk("aio_release(%p)\n", ioctx);
1272        if (likely(!was_dead))
1273                put_ioctx(ioctx);       /* twice for the list */
1274
1275        kill_ctx(ioctx);
1276
1277        /*
1278         * Wake up any waiters.  The setting of ctx->dead must be seen
1279         * by other CPUs at this point.  Right now, we rely on the
1280         * locking done by the above calls to ensure this consistency.
1281         */
1282        wake_up_all(&ioctx->wait);
1283}
1284
1285/* sys_io_setup:
1286 *      Create an aio_context capable of receiving at least nr_events.
1287 *      ctxp must not point to an aio_context that already exists, and
1288 *      must be initialized to 0 prior to the call.  On successful
1289 *      creation of the aio_context, *ctxp is filled in with the resulting 
1290 *      handle.  May fail with -EINVAL if *ctxp is not initialized,
1291 *      if the specified nr_events exceeds internal limits.  May fail 
1292 *      with -EAGAIN if the specified nr_events exceeds the user's limit 
1293 *      of available events.  May fail with -ENOMEM if insufficient kernel
1294 *      resources are available.  May fail with -EFAULT if an invalid
1295 *      pointer is passed for ctxp.  Will fail with -ENOSYS if not
1296 *      implemented.
1297 */
1298SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1299{
1300        struct kioctx *ioctx = NULL;
1301        unsigned long ctx;
1302        long ret;
1303
1304        ret = get_user(ctx, ctxp);
1305        if (unlikely(ret))
1306                goto out;
1307
1308        ret = -EINVAL;
1309        if (unlikely(ctx || nr_events == 0)) {
1310                pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1311                         ctx, nr_events);
1312                goto out;
1313        }
1314
1315        ioctx = ioctx_alloc(nr_events);
1316        ret = PTR_ERR(ioctx);
1317        if (!IS_ERR(ioctx)) {
1318                ret = put_user(ioctx->user_id, ctxp);
1319                if (ret)
1320                        io_destroy(ioctx);
1321                put_ioctx(ioctx);
1322        }
1323
1324out:
1325        return ret;
1326}
1327
1328/* sys_io_destroy:
1329 *      Destroy the aio_context specified.  May cancel any outstanding 
1330 *      AIOs and block on completion.  Will fail with -ENOSYS if not
1331 *      implemented.  May fail with -EINVAL if the context pointed to
1332 *      is invalid.
1333 */
1334SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1335{
1336        struct kioctx *ioctx = lookup_ioctx(ctx);
1337        if (likely(NULL != ioctx)) {
1338                io_destroy(ioctx);
1339                put_ioctx(ioctx);
1340                return 0;
1341        }
1342        pr_debug("EINVAL: io_destroy: invalid context id\n");
1343        return -EINVAL;
1344}
1345
1346static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1347{
1348        struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1349
1350        BUG_ON(ret <= 0);
1351
1352        while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1353                ssize_t this = min((ssize_t)iov->iov_len, ret);
1354                iov->iov_base += this;
1355                iov->iov_len -= this;
1356                iocb->ki_left -= this;
1357                ret -= this;
1358                if (iov->iov_len == 0) {
1359                        iocb->ki_cur_seg++;
1360                        iov++;
1361                }
1362        }
1363
1364        /* the caller should not have done more io than what fit in
1365         * the remaining iovecs */
1366        BUG_ON(ret > 0 && iocb->ki_left == 0);
1367}
1368
1369static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1370{
1371        struct file *file = iocb->ki_filp;
1372        struct address_space *mapping = file->f_mapping;
1373        struct inode *inode = mapping->host;
1374        ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1375                         unsigned long, loff_t);
1376        ssize_t ret = 0;
1377        unsigned short opcode;
1378
1379        if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1380                (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1381                rw_op = file->f_op->aio_read;
1382                opcode = IOCB_CMD_PREADV;
1383        } else {
1384                rw_op = file->f_op->aio_write;
1385                opcode = IOCB_CMD_PWRITEV;
1386        }
1387
1388        /* This matches the pread()/pwrite() logic */
1389        if (iocb->ki_pos < 0)
1390                return -EINVAL;
1391
1392        do {
1393                ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1394                            iocb->ki_nr_segs - iocb->ki_cur_seg,
1395                            iocb->ki_pos);
1396                if (ret > 0)
1397                        aio_advance_iovec(iocb, ret);
1398
1399        /* retry all partial writes.  retry partial reads as long as its a
1400         * regular file. */
1401        } while (ret > 0 && iocb->ki_left > 0 &&
1402                 (opcode == IOCB_CMD_PWRITEV ||
1403                  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1404
1405        /* This means we must have transferred all that we could */
1406        /* No need to retry anymore */
1407        if ((ret == 0) || (iocb->ki_left == 0))
1408                ret = iocb->ki_nbytes - iocb->ki_left;
1409
1410        /* If we managed to write some out we return that, rather than
1411         * the eventual error. */
1412        if (opcode == IOCB_CMD_PWRITEV
1413            && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1414            && iocb->ki_nbytes - iocb->ki_left)
1415                ret = iocb->ki_nbytes - iocb->ki_left;
1416
1417        return ret;
1418}
1419
1420static ssize_t aio_fdsync(struct kiocb *iocb)
1421{
1422        struct file *file = iocb->ki_filp;
1423        ssize_t ret = -EINVAL;
1424
1425        if (file->f_op->aio_fsync)
1426                ret = file->f_op->aio_fsync(iocb, 1);
1427        return ret;
1428}
1429
1430static ssize_t aio_fsync(struct kiocb *iocb)
1431{
1432        struct file *file = iocb->ki_filp;
1433        ssize_t ret = -EINVAL;
1434
1435        if (file->f_op->aio_fsync)
1436                ret = file->f_op->aio_fsync(iocb, 0);
1437        return ret;
1438}
1439
1440static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1441{
1442        ssize_t ret;
1443
1444#ifdef CONFIG_COMPAT
1445        if (compat)
1446                ret = compat_rw_copy_check_uvector(type,
1447                                (struct compat_iovec __user *)kiocb->ki_buf,
1448                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1449                                &kiocb->ki_iovec);
1450        else
1451#endif
1452                ret = rw_copy_check_uvector(type,
1453                                (struct iovec __user *)kiocb->ki_buf,
1454                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1455                                &kiocb->ki_iovec);
1456        if (ret < 0)
1457                goto out;
1458
1459        ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
1460        if (ret < 0)
1461                goto out;
1462
1463        kiocb->ki_nr_segs = kiocb->ki_nbytes;
1464        kiocb->ki_cur_seg = 0;
1465        /* ki_nbytes/left now reflect bytes instead of segs */
1466        kiocb->ki_nbytes = ret;
1467        kiocb->ki_left = ret;
1468
1469        ret = 0;
1470out:
1471        return ret;
1472}
1473
1474static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
1475{
1476        int bytes;
1477
1478        bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
1479        if (bytes < 0)
1480                return bytes;
1481
1482        kiocb->ki_iovec = &kiocb->ki_inline_vec;
1483        kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1484        kiocb->ki_iovec->iov_len = bytes;
1485        kiocb->ki_nr_segs = 1;
1486        kiocb->ki_cur_seg = 0;
1487        return 0;
1488}
1489
1490/*
1491 * aio_setup_iocb:
1492 *      Performs the initial checks and aio retry method
1493 *      setup for the kiocb at the time of io submission.
1494 */
1495static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1496{
1497        struct file *file = kiocb->ki_filp;
1498        ssize_t ret = 0;
1499
1500        switch (kiocb->ki_opcode) {
1501        case IOCB_CMD_PREAD:
1502                ret = -EBADF;
1503                if (unlikely(!(file->f_mode & FMODE_READ)))
1504                        break;
1505                ret = -EFAULT;
1506                if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1507                        kiocb->ki_left)))
1508                        break;
1509                ret = aio_setup_single_vector(READ, file, kiocb);
1510                if (ret)
1511                        break;
1512                ret = -EINVAL;
1513                if (file->f_op->aio_read)
1514                        kiocb->ki_retry = aio_rw_vect_retry;
1515                break;
1516        case IOCB_CMD_PWRITE:
1517                ret = -EBADF;
1518                if (unlikely(!(file->f_mode & FMODE_WRITE)))
1519                        break;
1520                ret = -EFAULT;
1521                if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1522                        kiocb->ki_left)))
1523                        break;
1524                ret = aio_setup_single_vector(WRITE, file, kiocb);
1525                if (ret)
1526                        break;
1527                ret = -EINVAL;
1528                if (file->f_op->aio_write)
1529                        kiocb->ki_retry = aio_rw_vect_retry;
1530                break;
1531        case IOCB_CMD_PREADV:
1532                ret = -EBADF;
1533                if (unlikely(!(file->f_mode & FMODE_READ)))
1534                        break;
1535                ret = aio_setup_vectored_rw(READ, kiocb, compat);
1536                if (ret)
1537                        break;
1538                ret = -EINVAL;
1539                if (file->f_op->aio_read)
1540                        kiocb->ki_retry = aio_rw_vect_retry;
1541                break;
1542        case IOCB_CMD_PWRITEV:
1543                ret = -EBADF;
1544                if (unlikely(!(file->f_mode & FMODE_WRITE)))
1545                        break;
1546                ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1547                if (ret)
1548                        break;
1549                ret = -EINVAL;
1550                if (file->f_op->aio_write)
1551                        kiocb->ki_retry = aio_rw_vect_retry;
1552                break;
1553        case IOCB_CMD_FDSYNC:
1554                ret = -EINVAL;
1555                if (file->f_op->aio_fsync)
1556                        kiocb->ki_retry = aio_fdsync;
1557                break;
1558        case IOCB_CMD_FSYNC:
1559                ret = -EINVAL;
1560                if (file->f_op->aio_fsync)
1561                        kiocb->ki_retry = aio_fsync;
1562                break;
1563        default:
1564                dprintk("EINVAL: io_submit: no operation provided\n");
1565                ret = -EINVAL;
1566        }
1567
1568        if (!kiocb->ki_retry)
1569                return ret;
1570
1571        return 0;
1572}
1573
1574static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1575                         struct iocb *iocb, struct kiocb_batch *batch,
1576                         bool compat)
1577{
1578        struct kiocb *req;
1579        struct file *file;
1580        ssize_t ret;
1581
1582        /* enforce forwards compatibility on users */
1583        if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1584                pr_debug("EINVAL: io_submit: reserve field set\n");
1585                return -EINVAL;
1586        }
1587
1588        /* prevent overflows */
1589        if (unlikely(
1590            (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1591            (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1592            ((ssize_t)iocb->aio_nbytes < 0)
1593           )) {
1594                pr_debug("EINVAL: io_submit: overflow check\n");
1595                return -EINVAL;
1596        }
1597
1598        file = fget(iocb->aio_fildes);
1599        if (unlikely(!file))
1600                return -EBADF;
1601
1602        req = aio_get_req(ctx, batch);  /* returns with 2 references to req */
1603        if (unlikely(!req)) {
1604                fput(file);
1605                return -EAGAIN;
1606        }
1607        req->ki_filp = file;
1608        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1609                /*
1610                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1611                 * instance of the file* now. The file descriptor must be
1612                 * an eventfd() fd, and will be signaled for each completed
1613                 * event using the eventfd_signal() function.
1614                 */
1615                req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1616                if (IS_ERR(req->ki_eventfd)) {
1617                        ret = PTR_ERR(req->ki_eventfd);
1618                        req->ki_eventfd = NULL;
1619                        goto out_put_req;
1620                }
1621        }
1622
1623        ret = put_user(req->ki_key, &user_iocb->aio_key);
1624        if (unlikely(ret)) {
1625                dprintk("EFAULT: aio_key\n");
1626                goto out_put_req;
1627        }
1628
1629        req->ki_obj.user = user_iocb;
1630        req->ki_user_data = iocb->aio_data;
1631        req->ki_pos = iocb->aio_offset;
1632
1633        req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1634        req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1635        req->ki_opcode = iocb->aio_lio_opcode;
1636
1637        ret = aio_setup_iocb(req, compat);
1638
1639        if (ret)
1640                goto out_put_req;
1641
1642        spin_lock_irq(&ctx->ctx_lock);
1643        /*
1644         * We could have raced with io_destroy() and are currently holding a
1645         * reference to ctx which should be destroyed. We cannot submit IO
1646         * since ctx gets freed as soon as io_submit() puts its reference.  The
1647         * check here is reliable: io_destroy() sets ctx->dead before waiting
1648         * for outstanding IO and the barrier between these two is realized by
1649         * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
1650         * increment ctx->reqs_active before checking for ctx->dead and the
1651         * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1652         * don't see ctx->dead set here, io_destroy() waits for our IO to
1653         * finish.
1654         */
1655        if (ctx->dead) {
1656                spin_unlock_irq(&ctx->ctx_lock);
1657                ret = -EINVAL;
1658                goto out_put_req;
1659        }
1660        aio_run_iocb(req);
1661        if (!list_empty(&ctx->run_list)) {
1662                /* drain the run list */
1663                while (__aio_run_iocbs(ctx))
1664                        ;
1665        }
1666        spin_unlock_irq(&ctx->ctx_lock);
1667
1668        aio_put_req(req);       /* drop extra ref to req */
1669        return 0;
1670
1671out_put_req:
1672        aio_put_req(req);       /* drop extra ref to req */
1673        aio_put_req(req);       /* drop i/o ref to req */
1674        return ret;
1675}
1676
1677long do_io_submit(aio_context_t ctx_id, long nr,
1678                  struct iocb __user *__user *iocbpp, bool compat)
1679{
1680        struct kioctx *ctx;
1681        long ret = 0;
1682        int i = 0;
1683        struct blk_plug plug;
1684        struct kiocb_batch batch;
1685
1686        if (unlikely(nr < 0))
1687                return -EINVAL;
1688
1689        if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1690                nr = LONG_MAX/sizeof(*iocbpp);
1691
1692        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1693                return -EFAULT;
1694
1695        ctx = lookup_ioctx(ctx_id);
1696        if (unlikely(!ctx)) {
1697                pr_debug("EINVAL: io_submit: invalid context id\n");
1698                return -EINVAL;
1699        }
1700
1701        kiocb_batch_init(&batch, nr);
1702
1703        blk_start_plug(&plug);
1704
1705        /*
1706         * AKPM: should this return a partial result if some of the IOs were
1707         * successfully submitted?
1708         */
1709        for (i=0; i<nr; i++) {
1710                struct iocb __user *user_iocb;
1711                struct iocb tmp;
1712
1713                if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1714                        ret = -EFAULT;
1715                        break;
1716                }
1717
1718                if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1719                        ret = -EFAULT;
1720                        break;
1721                }
1722
1723                ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
1724                if (ret)
1725                        break;
1726        }
1727        blk_finish_plug(&plug);
1728
1729        kiocb_batch_free(ctx, &batch);
1730        put_ioctx(ctx);
1731        return i ? i : ret;
1732}
1733
1734/* sys_io_submit:
1735 *      Queue the nr iocbs pointed to by iocbpp for processing.  Returns
1736 *      the number of iocbs queued.  May return -EINVAL if the aio_context
1737 *      specified by ctx_id is invalid, if nr is < 0, if the iocb at
1738 *      *iocbpp[0] is not properly initialized, if the operation specified
1739 *      is invalid for the file descriptor in the iocb.  May fail with
1740 *      -EFAULT if any of the data structures point to invalid data.  May
1741 *      fail with -EBADF if the file descriptor specified in the first
1742 *      iocb is invalid.  May fail with -EAGAIN if insufficient resources
1743 *      are available to queue any iocbs.  Will return 0 if nr is 0.  Will
1744 *      fail with -ENOSYS if not implemented.
1745 */
1746SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1747                struct iocb __user * __user *, iocbpp)
1748{
1749        return do_io_submit(ctx_id, nr, iocbpp, 0);
1750}
1751
1752/* lookup_kiocb
1753 *      Finds a given iocb for cancellation.
1754 */
1755static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1756                                  u32 key)
1757{
1758        struct list_head *pos;
1759
1760        assert_spin_locked(&ctx->ctx_lock);
1761
1762        /* TODO: use a hash or array, this sucks. */
1763        list_for_each(pos, &ctx->active_reqs) {
1764                struct kiocb *kiocb = list_kiocb(pos);
1765                if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1766                        return kiocb;
1767        }
1768        return NULL;
1769}
1770
1771/* sys_io_cancel:
1772 *      Attempts to cancel an iocb previously passed to io_submit.  If
1773 *      the operation is successfully cancelled, the resulting event is
1774 *      copied into the memory pointed to by result without being placed
1775 *      into the completion queue and 0 is returned.  May fail with
1776 *      -EFAULT if any of the data structures pointed to are invalid.
1777 *      May fail with -EINVAL if aio_context specified by ctx_id is
1778 *      invalid.  May fail with -EAGAIN if the iocb specified was not
1779 *      cancelled.  Will fail with -ENOSYS if not implemented.
1780 */
1781SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1782                struct io_event __user *, result)
1783{
1784        int (*cancel)(struct kiocb *iocb, struct io_event *res);
1785        struct kioctx *ctx;
1786        struct kiocb *kiocb;
1787        u32 key;
1788        int ret;
1789
1790        ret = get_user(key, &iocb->aio_key);
1791        if (unlikely(ret))
1792                return -EFAULT;
1793
1794        ctx = lookup_ioctx(ctx_id);
1795        if (unlikely(!ctx))
1796                return -EINVAL;
1797
1798        spin_lock_irq(&ctx->ctx_lock);
1799        ret = -EAGAIN;
1800        kiocb = lookup_kiocb(ctx, iocb, key);
1801        if (kiocb && kiocb->ki_cancel) {
1802                cancel = kiocb->ki_cancel;
1803                kiocb->ki_users ++;
1804                kiocbSetCancelled(kiocb);
1805        } else
1806                cancel = NULL;
1807        spin_unlock_irq(&ctx->ctx_lock);
1808
1809        if (NULL != cancel) {
1810                struct io_event tmp;
1811                pr_debug("calling cancel\n");
1812                memset(&tmp, 0, sizeof(tmp));
1813                tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1814                tmp.data = kiocb->ki_user_data;
1815                ret = cancel(kiocb, &tmp);
1816                if (!ret) {
1817                        /* Cancellation succeeded -- copy the result
1818                         * into the user's buffer.
1819                         */
1820                        if (copy_to_user(result, &tmp, sizeof(tmp)))
1821                                ret = -EFAULT;
1822                }
1823        } else
1824                ret = -EINVAL;
1825
1826        put_ioctx(ctx);
1827
1828        return ret;
1829}
1830
1831/* io_getevents:
1832 *      Attempts to read at least min_nr events and up to nr events from
1833 *      the completion queue for the aio_context specified by ctx_id. If
1834 *      it succeeds, the number of read events is returned. May fail with
1835 *      -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1836 *      out of range, if timeout is out of range.  May fail with -EFAULT
1837 *      if any of the memory specified is invalid.  May return 0 or
1838 *      < min_nr if the timeout specified by timeout has elapsed
1839 *      before sufficient events are available, where timeout == NULL
1840 *      specifies an infinite timeout. Note that the timeout pointed to by
1841 *      timeout is relative and will be updated if not NULL and the
1842 *      operation blocks. Will fail with -ENOSYS if not implemented.
1843 */
1844SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1845                long, min_nr,
1846                long, nr,
1847                struct io_event __user *, events,
1848                struct timespec __user *, timeout)
1849{
1850        struct kioctx *ioctx = lookup_ioctx(ctx_id);
1851        long ret = -EINVAL;
1852
1853        if (likely(ioctx)) {
1854                if (likely(min_nr <= nr && min_nr >= 0))
1855                        ret = read_events(ioctx, min_nr, nr, events, timeout);
1856                put_ioctx(ioctx);
1857        }
1858
1859        asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1860        return ret;
1861}
1862
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.