linux/drivers/usb/gadget/uvc_queue.c
<<
>>
Prefs
   1/*
   2 *      uvc_queue.c  --  USB Video Class driver - Buffers management
   3 *
   4 *      Copyright (C) 2005-2010
   5 *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   6 *
   7 *      This program is free software; you can redistribute it and/or modify
   8 *      it under the terms of the GNU General Public License as published by
   9 *      the Free Software Foundation; either version 2 of the License, or
  10 *      (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/mm.h>
  15#include <linux/list.h>
  16#include <linux/module.h>
  17#include <linux/usb.h>
  18#include <linux/videodev2.h>
  19#include <linux/vmalloc.h>
  20#include <linux/wait.h>
  21#include <linux/atomic.h>
  22
  23#include "uvc.h"
  24
  25/* ------------------------------------------------------------------------
  26 * Video buffers queue management.
  27 *
  28 * Video queues is initialized by uvc_queue_init(). The function performs
  29 * basic initialization of the uvc_video_queue struct and never fails.
  30 *
  31 * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
  32 * uvc_free_buffers respectively. The former acquires the video queue lock,
  33 * while the later must be called with the lock held (so that allocation can
  34 * free previously allocated buffers). Trying to free buffers that are mapped
  35 * to user space will return -EBUSY.
  36 *
  37 * Video buffers are managed using two queues. However, unlike most USB video
  38 * drivers that use an in queue and an out queue, we use a main queue to hold
  39 * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
  40 * hold empty buffers. This design (copied from video-buf) minimizes locking
  41 * in interrupt, as only one queue is shared between interrupt and user
  42 * contexts.
  43 *
  44 * Use cases
  45 * ---------
  46 *
  47 * Unless stated otherwise, all operations that modify the irq buffers queue
  48 * are protected by the irq spinlock.
  49 *
  50 * 1. The user queues the buffers, starts streaming and dequeues a buffer.
  51 *
  52 *    The buffers are added to the main and irq queues. Both operations are
  53 *    protected by the queue lock, and the later is protected by the irq
  54 *    spinlock as well.
  55 *
  56 *    The completion handler fetches a buffer from the irq queue and fills it
  57 *    with video data. If no buffer is available (irq queue empty), the handler
  58 *    returns immediately.
  59 *
  60 *    When the buffer is full, the completion handler removes it from the irq
  61 *    queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
  62 *    At that point, any process waiting on the buffer will be woken up. If a
  63 *    process tries to dequeue a buffer after it has been marked ready, the
  64 *    dequeing will succeed immediately.
  65 *
  66 * 2. Buffers are queued, user is waiting on a buffer and the device gets
  67 *    disconnected.
  68 *
  69 *    When the device is disconnected, the kernel calls the completion handler
  70 *    with an appropriate status code. The handler marks all buffers in the
  71 *    irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
  72 *    that any process waiting on a buffer gets woken up.
  73 *
  74 *    Waking up up the first buffer on the irq list is not enough, as the
  75 *    process waiting on the buffer might restart the dequeue operation
  76 *    immediately.
  77 *
  78 */
  79
  80static void
  81uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
  82{
  83        mutex_init(&queue->mutex);
  84        spin_lock_init(&queue->irqlock);
  85        INIT_LIST_HEAD(&queue->mainqueue);
  86        INIT_LIST_HEAD(&queue->irqqueue);
  87        queue->type = type;
  88}
  89
  90/*
  91 * Free the video buffers.
  92 *
  93 * This function must be called with the queue lock held.
  94 */
  95static int uvc_free_buffers(struct uvc_video_queue *queue)
  96{
  97        unsigned int i;
  98
  99        for (i = 0; i < queue->count; ++i) {
 100                if (queue->buffer[i].vma_use_count != 0)
 101                        return -EBUSY;
 102        }
 103
 104        if (queue->count) {
 105                vfree(queue->mem);
 106                queue->count = 0;
 107        }
 108
 109        return 0;
 110}
 111
 112/*
 113 * Allocate the video buffers.
 114 *
 115 * Pages are reserved to make sure they will not be swapped, as they will be
 116 * filled in the URB completion handler.
 117 *
 118 * Buffers will be individually mapped, so they must all be page aligned.
 119 */
 120static int
 121uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
 122                  unsigned int buflength)
 123{
 124        unsigned int bufsize = PAGE_ALIGN(buflength);
 125        unsigned int i;
 126        void *mem = NULL;
 127        int ret;
 128
 129        if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
 130                nbuffers = UVC_MAX_VIDEO_BUFFERS;
 131
 132        mutex_lock(&queue->mutex);
 133
 134        if ((ret = uvc_free_buffers(queue)) < 0)
 135                goto done;
 136
 137        /* Bail out if no buffers should be allocated. */
 138        if (nbuffers == 0)
 139                goto done;
 140
 141        /* Decrement the number of buffers until allocation succeeds. */
 142        for (; nbuffers > 0; --nbuffers) {
 143                mem = vmalloc_32(nbuffers * bufsize);
 144                if (mem != NULL)
 145                        break;
 146        }
 147
 148        if (mem == NULL) {
 149                ret = -ENOMEM;
 150                goto done;
 151        }
 152
 153        for (i = 0; i < nbuffers; ++i) {
 154                memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
 155                queue->buffer[i].buf.index = i;
 156                queue->buffer[i].buf.m.offset = i * bufsize;
 157                queue->buffer[i].buf.length = buflength;
 158                queue->buffer[i].buf.type = queue->type;
 159                queue->buffer[i].buf.sequence = 0;
 160                queue->buffer[i].buf.field = V4L2_FIELD_NONE;
 161                queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
 162                queue->buffer[i].buf.flags = 0;
 163                init_waitqueue_head(&queue->buffer[i].wait);
 164        }
 165
 166        queue->mem = mem;
 167        queue->count = nbuffers;
 168        queue->buf_size = bufsize;
 169        ret = nbuffers;
 170
 171done:
 172        mutex_unlock(&queue->mutex);
 173        return ret;
 174}
 175
 176static void __uvc_query_buffer(struct uvc_buffer *buf,
 177                struct v4l2_buffer *v4l2_buf)
 178{
 179        memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
 180
 181        if (buf->vma_use_count)
 182                v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
 183
 184        switch (buf->state) {
 185        case UVC_BUF_STATE_ERROR:
 186        case UVC_BUF_STATE_DONE:
 187                v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
 188                break;
 189        case UVC_BUF_STATE_QUEUED:
 190        case UVC_BUF_STATE_ACTIVE:
 191                v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
 192                break;
 193        case UVC_BUF_STATE_IDLE:
 194        default:
 195                break;
 196        }
 197}
 198
 199static int
 200uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
 201{
 202        int ret = 0;
 203
 204        mutex_lock(&queue->mutex);
 205        if (v4l2_buf->index >= queue->count) {
 206                ret = -EINVAL;
 207                goto done;
 208        }
 209
 210        __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
 211
 212done:
 213        mutex_unlock(&queue->mutex);
 214        return ret;
 215}
 216
 217/*
 218 * Queue a video buffer. Attempting to queue a buffer that has already been
 219 * queued will return -EINVAL.
 220 */
 221static int
 222uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
 223{
 224        struct uvc_buffer *buf;
 225        unsigned long flags;
 226        int ret = 0;
 227
 228        uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
 229
 230        if (v4l2_buf->type != queue->type ||
 231            v4l2_buf->memory != V4L2_MEMORY_MMAP) {
 232                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
 233                        "and/or memory (%u).\n", v4l2_buf->type,
 234                        v4l2_buf->memory);
 235                return -EINVAL;
 236        }
 237
 238        mutex_lock(&queue->mutex);
 239        if (v4l2_buf->index >= queue->count) {
 240                uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
 241                ret = -EINVAL;
 242                goto done;
 243        }
 244
 245        buf = &queue->buffer[v4l2_buf->index];
 246        if (buf->state != UVC_BUF_STATE_IDLE) {
 247                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
 248                        "(%u).\n", buf->state);
 249                ret = -EINVAL;
 250                goto done;
 251        }
 252
 253        if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
 254            v4l2_buf->bytesused > buf->buf.length) {
 255                uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
 256                ret = -EINVAL;
 257                goto done;
 258        }
 259
 260        if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 261                buf->buf.bytesused = 0;
 262        else
 263                buf->buf.bytesused = v4l2_buf->bytesused;
 264
 265        spin_lock_irqsave(&queue->irqlock, flags);
 266        if (queue->flags & UVC_QUEUE_DISCONNECTED) {
 267                spin_unlock_irqrestore(&queue->irqlock, flags);
 268                ret = -ENODEV;
 269                goto done;
 270        }
 271        buf->state = UVC_BUF_STATE_QUEUED;
 272
 273        ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
 274        queue->flags &= ~UVC_QUEUE_PAUSED;
 275
 276        list_add_tail(&buf->stream, &queue->mainqueue);
 277        list_add_tail(&buf->queue, &queue->irqqueue);
 278        spin_unlock_irqrestore(&queue->irqlock, flags);
 279
 280done:
 281        mutex_unlock(&queue->mutex);
 282        return ret;
 283}
 284
 285static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
 286{
 287        if (nonblocking) {
 288                return (buf->state != UVC_BUF_STATE_QUEUED &&
 289                        buf->state != UVC_BUF_STATE_ACTIVE)
 290                        ? 0 : -EAGAIN;
 291        }
 292
 293        return wait_event_interruptible(buf->wait,
 294                buf->state != UVC_BUF_STATE_QUEUED &&
 295                buf->state != UVC_BUF_STATE_ACTIVE);
 296}
 297
 298/*
 299 * Dequeue a video buffer. If nonblocking is false, block until a buffer is
 300 * available.
 301 */
 302static int
 303uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf,
 304                   int nonblocking)
 305{
 306        struct uvc_buffer *buf;
 307        int ret = 0;
 308
 309        if (v4l2_buf->type != queue->type ||
 310            v4l2_buf->memory != V4L2_MEMORY_MMAP) {
 311                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
 312                        "and/or memory (%u).\n", v4l2_buf->type,
 313                        v4l2_buf->memory);
 314                return -EINVAL;
 315        }
 316
 317        mutex_lock(&queue->mutex);
 318        if (list_empty(&queue->mainqueue)) {
 319                uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
 320                ret = -EINVAL;
 321                goto done;
 322        }
 323
 324        buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
 325        if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
 326                goto done;
 327
 328        uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
 329                buf->buf.index, buf->state, buf->buf.bytesused);
 330
 331        switch (buf->state) {
 332        case UVC_BUF_STATE_ERROR:
 333                uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
 334                        "(transmission error).\n");
 335                ret = -EIO;
 336        case UVC_BUF_STATE_DONE:
 337                buf->state = UVC_BUF_STATE_IDLE;
 338                break;
 339
 340        case UVC_BUF_STATE_IDLE:
 341        case UVC_BUF_STATE_QUEUED:
 342        case UVC_BUF_STATE_ACTIVE:
 343        default:
 344                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
 345                        "(driver bug?).\n", buf->state);
 346                ret = -EINVAL;
 347                goto done;
 348        }
 349
 350        list_del(&buf->stream);
 351        __uvc_query_buffer(buf, v4l2_buf);
 352
 353done:
 354        mutex_unlock(&queue->mutex);
 355        return ret;
 356}
 357
 358/*
 359 * Poll the video queue.
 360 *
 361 * This function implements video queue polling and is intended to be used by
 362 * the device poll handler.
 363 */
 364static unsigned int
 365uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
 366               poll_table *wait)
 367{
 368        struct uvc_buffer *buf;
 369        unsigned int mask = 0;
 370
 371        mutex_lock(&queue->mutex);
 372        if (list_empty(&queue->mainqueue))
 373                goto done;
 374
 375        buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
 376
 377        poll_wait(file, &buf->wait, wait);
 378        if (buf->state == UVC_BUF_STATE_DONE ||
 379            buf->state == UVC_BUF_STATE_ERROR)
 380                mask |= POLLOUT | POLLWRNORM;
 381
 382done:
 383        mutex_unlock(&queue->mutex);
 384        return mask;
 385}
 386
 387/*
 388 * VMA operations.
 389 */
 390static void uvc_vm_open(struct vm_area_struct *vma)
 391{
 392        struct uvc_buffer *buffer = vma->vm_private_data;
 393        buffer->vma_use_count++;
 394}
 395
 396static void uvc_vm_close(struct vm_area_struct *vma)
 397{
 398        struct uvc_buffer *buffer = vma->vm_private_data;
 399        buffer->vma_use_count--;
 400}
 401
 402static struct vm_operations_struct uvc_vm_ops = {
 403        .open           = uvc_vm_open,
 404        .close          = uvc_vm_close,
 405};
 406
 407/*
 408 * Memory-map a buffer.
 409 *
 410 * This function implements video buffer memory mapping and is intended to be
 411 * used by the device mmap handler.
 412 */
 413static int
 414uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
 415{
 416        struct uvc_buffer *uninitialized_var(buffer);
 417        struct page *page;
 418        unsigned long addr, start, size;
 419        unsigned int i;
 420        int ret = 0;
 421
 422        start = vma->vm_start;
 423        size = vma->vm_end - vma->vm_start;
 424
 425        mutex_lock(&queue->mutex);
 426
 427        for (i = 0; i < queue->count; ++i) {
 428                buffer = &queue->buffer[i];
 429                if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
 430                        break;
 431        }
 432
 433        if (i == queue->count || size != queue->buf_size) {
 434                ret = -EINVAL;
 435                goto done;
 436        }
 437
 438        /*
 439         * VM_IO marks the area as being an mmaped region for I/O to a
 440         * device. It also prevents the region from being core dumped.
 441         */
 442        vma->vm_flags |= VM_IO;
 443
 444        addr = (unsigned long)queue->mem + buffer->buf.m.offset;
 445        while (size > 0) {
 446                page = vmalloc_to_page((void *)addr);
 447                if ((ret = vm_insert_page(vma, start, page)) < 0)
 448                        goto done;
 449
 450                start += PAGE_SIZE;
 451                addr += PAGE_SIZE;
 452                size -= PAGE_SIZE;
 453        }
 454
 455        vma->vm_ops = &uvc_vm_ops;
 456        vma->vm_private_data = buffer;
 457        uvc_vm_open(vma);
 458
 459done:
 460        mutex_unlock(&queue->mutex);
 461        return ret;
 462}
 463
 464/*
 465 * Cancel the video buffers queue.
 466 *
 467 * Cancelling the queue marks all buffers on the irq queue as erroneous,
 468 * wakes them up and removes them from the queue.
 469 *
 470 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
 471 * fail with -ENODEV.
 472 *
 473 * This function acquires the irq spinlock and can be called from interrupt
 474 * context.
 475 */
 476static void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
 477{
 478        struct uvc_buffer *buf;
 479        unsigned long flags;
 480
 481        spin_lock_irqsave(&queue->irqlock, flags);
 482        while (!list_empty(&queue->irqqueue)) {
 483                buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 484                                       queue);
 485                list_del(&buf->queue);
 486                buf->state = UVC_BUF_STATE_ERROR;
 487                wake_up(&buf->wait);
 488        }
 489        /* This must be protected by the irqlock spinlock to avoid race
 490         * conditions between uvc_queue_buffer and the disconnection event that
 491         * could result in an interruptible wait in uvc_dequeue_buffer. Do not
 492         * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
 493         * state outside the queue code.
 494         */
 495        if (disconnect)
 496                queue->flags |= UVC_QUEUE_DISCONNECTED;
 497        spin_unlock_irqrestore(&queue->irqlock, flags);
 498}
 499
 500/*
 501 * Enable or disable the video buffers queue.
 502 *
 503 * The queue must be enabled before starting video acquisition and must be
 504 * disabled after stopping it. This ensures that the video buffers queue
 505 * state can be properly initialized before buffers are accessed from the
 506 * interrupt handler.
 507 *
 508 * Enabling the video queue initializes parameters (such as sequence number,
 509 * sync pattern, ...). If the queue is already enabled, return -EBUSY.
 510 *
 511 * Disabling the video queue cancels the queue and removes all buffers from
 512 * the main queue.
 513 *
 514 * This function can't be called from interrupt context. Use
 515 * uvc_queue_cancel() instead.
 516 */
 517static int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
 518{
 519        unsigned int i;
 520        int ret = 0;
 521
 522        mutex_lock(&queue->mutex);
 523        if (enable) {
 524                if (uvc_queue_streaming(queue)) {
 525                        ret = -EBUSY;
 526                        goto done;
 527                }
 528                queue->sequence = 0;
 529                queue->flags |= UVC_QUEUE_STREAMING;
 530                queue->buf_used = 0;
 531        } else {
 532                uvc_queue_cancel(queue, 0);
 533                INIT_LIST_HEAD(&queue->mainqueue);
 534
 535                for (i = 0; i < queue->count; ++i)
 536                        queue->buffer[i].state = UVC_BUF_STATE_IDLE;
 537
 538                queue->flags &= ~UVC_QUEUE_STREAMING;
 539        }
 540
 541done:
 542        mutex_unlock(&queue->mutex);
 543        return ret;
 544}
 545
 546static struct uvc_buffer *
 547uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
 548{
 549        struct uvc_buffer *nextbuf;
 550        unsigned long flags;
 551
 552        if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
 553            buf->buf.length != buf->buf.bytesused) {
 554                buf->state = UVC_BUF_STATE_QUEUED;
 555                buf->buf.bytesused = 0;
 556                return buf;
 557        }
 558
 559        spin_lock_irqsave(&queue->irqlock, flags);
 560        list_del(&buf->queue);
 561        if (!list_empty(&queue->irqqueue))
 562                nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 563                                           queue);
 564        else
 565                nextbuf = NULL;
 566        spin_unlock_irqrestore(&queue->irqlock, flags);
 567
 568        buf->buf.sequence = queue->sequence++;
 569        do_gettimeofday(&buf->buf.timestamp);
 570
 571        wake_up(&buf->wait);
 572        return nextbuf;
 573}
 574
 575static struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue)
 576{
 577        struct uvc_buffer *buf = NULL;
 578
 579        if (!list_empty(&queue->irqqueue))
 580                buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 581                                       queue);
 582        else
 583                queue->flags |= UVC_QUEUE_PAUSED;
 584
 585        return buf;
 586}
 587
 588