linux/drivers/media/video/uvc/uvc_queue.c
<<
>>
Prefs
   1/*
   2 *      uvc_queue.c  --  USB Video Class driver - Buffers management
   3 *
   4 *      Copyright (C) 2005-2009
   5 *          Laurent Pinchart (laurent.pinchart@skynet.be)
   6 *
   7 *      This program is free software; you can redistribute it and/or modify
   8 *      it under the terms of the GNU General Public License as published by
   9 *      the Free Software Foundation; either version 2 of the License, or
  10 *      (at your option) any later version.
  11 *
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/list.h>
  17#include <linux/module.h>
  18#include <linux/usb.h>
  19#include <linux/videodev2.h>
  20#include <linux/vmalloc.h>
  21#include <linux/wait.h>
  22#include <asm/atomic.h>
  23
  24#include "uvcvideo.h"
  25
  26/* ------------------------------------------------------------------------
  27 * Video buffers queue management.
  28 *
  29 * Video queues is initialized by uvc_queue_init(). The function performs
  30 * basic initialization of the uvc_video_queue struct and never fails.
  31 *
  32 * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
  33 * uvc_free_buffers respectively. The former acquires the video queue lock,
  34 * while the later must be called with the lock held (so that allocation can
  35 * free previously allocated buffers). Trying to free buffers that are mapped
  36 * to user space will return -EBUSY.
  37 *
  38 * Video buffers are managed using two queues. However, unlike most USB video
  39 * drivers that use an in queue and an out queue, we use a main queue to hold
  40 * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
  41 * hold empty buffers. This design (copied from video-buf) minimizes locking
  42 * in interrupt, as only one queue is shared between interrupt and user
  43 * contexts.
  44 *
  45 * Use cases
  46 * ---------
  47 *
  48 * Unless stated otherwise, all operations that modify the irq buffers queue
  49 * are protected by the irq spinlock.
  50 *
  51 * 1. The user queues the buffers, starts streaming and dequeues a buffer.
  52 *
  53 *    The buffers are added to the main and irq queues. Both operations are
  54 *    protected by the queue lock, and the later is protected by the irq
  55 *    spinlock as well.
  56 *
  57 *    The completion handler fetches a buffer from the irq queue and fills it
  58 *    with video data. If no buffer is available (irq queue empty), the handler
  59 *    returns immediately.
  60 *
  61 *    When the buffer is full, the completion handler removes it from the irq
  62 *    queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue.
  63 *    At that point, any process waiting on the buffer will be woken up. If a
  64 *    process tries to dequeue a buffer after it has been marked done, the
  65 *    dequeing will succeed immediately.
  66 *
  67 * 2. Buffers are queued, user is waiting on a buffer and the device gets
  68 *    disconnected.
  69 *
  70 *    When the device is disconnected, the kernel calls the completion handler
  71 *    with an appropriate status code. The handler marks all buffers in the
  72 *    irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
  73 *    that any process waiting on a buffer gets woken up.
  74 *
  75 *    Waking up up the first buffer on the irq list is not enough, as the
  76 *    process waiting on the buffer might restart the dequeue operation
  77 *    immediately.
  78 *
  79 */
  80
  81void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
  82{
  83        mutex_init(&queue->mutex);
  84        spin_lock_init(&queue->irqlock);
  85        INIT_LIST_HEAD(&queue->mainqueue);
  86        INIT_LIST_HEAD(&queue->irqqueue);
  87        queue->type = type;
  88}
  89
  90/*
  91 * Allocate the video buffers.
  92 *
  93 * Pages are reserved to make sure they will not be swapped, as they will be
  94 * filled in the URB completion handler.
  95 *
  96 * Buffers will be individually mapped, so they must all be page aligned.
  97 */
  98int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
  99                unsigned int buflength)
 100{
 101        unsigned int bufsize = PAGE_ALIGN(buflength);
 102        unsigned int i;
 103        void *mem = NULL;
 104        int ret;
 105
 106        if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
 107                nbuffers = UVC_MAX_VIDEO_BUFFERS;
 108
 109        mutex_lock(&queue->mutex);
 110
 111        if ((ret = uvc_free_buffers(queue)) < 0)
 112                goto done;
 113
 114        /* Bail out if no buffers should be allocated. */
 115        if (nbuffers == 0)
 116                goto done;
 117
 118        /* Decrement the number of buffers until allocation succeeds. */
 119        for (; nbuffers > 0; --nbuffers) {
 120                mem = vmalloc_32(nbuffers * bufsize);
 121                if (mem != NULL)
 122                        break;
 123        }
 124
 125        if (mem == NULL) {
 126                ret = -ENOMEM;
 127                goto done;
 128        }
 129
 130        for (i = 0; i < nbuffers; ++i) {
 131                memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
 132                queue->buffer[i].buf.index = i;
 133                queue->buffer[i].buf.m.offset = i * bufsize;
 134                queue->buffer[i].buf.length = buflength;
 135                queue->buffer[i].buf.type = queue->type;
 136                queue->buffer[i].buf.sequence = 0;
 137                queue->buffer[i].buf.field = V4L2_FIELD_NONE;
 138                queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
 139                queue->buffer[i].buf.flags = 0;
 140                init_waitqueue_head(&queue->buffer[i].wait);
 141        }
 142
 143        queue->mem = mem;
 144        queue->count = nbuffers;
 145        queue->buf_size = bufsize;
 146        ret = nbuffers;
 147
 148done:
 149        mutex_unlock(&queue->mutex);
 150        return ret;
 151}
 152
 153/*
 154 * Free the video buffers.
 155 *
 156 * This function must be called with the queue lock held.
 157 */
 158int uvc_free_buffers(struct uvc_video_queue *queue)
 159{
 160        unsigned int i;
 161
 162        for (i = 0; i < queue->count; ++i) {
 163                if (queue->buffer[i].vma_use_count != 0)
 164                        return -EBUSY;
 165        }
 166
 167        if (queue->count) {
 168                vfree(queue->mem);
 169                queue->count = 0;
 170        }
 171
 172        return 0;
 173}
 174
 175/*
 176 * Check if buffers have been allocated.
 177 */
 178int uvc_queue_allocated(struct uvc_video_queue *queue)
 179{
 180        int allocated;
 181
 182        mutex_lock(&queue->mutex);
 183        allocated = queue->count != 0;
 184        mutex_unlock(&queue->mutex);
 185
 186        return allocated;
 187}
 188
 189static void __uvc_query_buffer(struct uvc_buffer *buf,
 190                struct v4l2_buffer *v4l2_buf)
 191{
 192        memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
 193
 194        if (buf->vma_use_count)
 195                v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
 196
 197        switch (buf->state) {
 198        case UVC_BUF_STATE_ERROR:
 199        case UVC_BUF_STATE_DONE:
 200                v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
 201                break;
 202        case UVC_BUF_STATE_QUEUED:
 203        case UVC_BUF_STATE_ACTIVE:
 204        case UVC_BUF_STATE_READY:
 205                v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
 206                break;
 207        case UVC_BUF_STATE_IDLE:
 208        default:
 209                break;
 210        }
 211}
 212
 213int uvc_query_buffer(struct uvc_video_queue *queue,
 214                struct v4l2_buffer *v4l2_buf)
 215{
 216        int ret = 0;
 217
 218        mutex_lock(&queue->mutex);
 219        if (v4l2_buf->index >= queue->count) {
 220                ret = -EINVAL;
 221                goto done;
 222        }
 223
 224        __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
 225
 226done:
 227        mutex_unlock(&queue->mutex);
 228        return ret;
 229}
 230
 231/*
 232 * Queue a video buffer. Attempting to queue a buffer that has already been
 233 * queued will return -EINVAL.
 234 */
 235int uvc_queue_buffer(struct uvc_video_queue *queue,
 236        struct v4l2_buffer *v4l2_buf)
 237{
 238        struct uvc_buffer *buf;
 239        unsigned long flags;
 240        int ret = 0;
 241
 242        uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
 243
 244        if (v4l2_buf->type != queue->type ||
 245            v4l2_buf->memory != V4L2_MEMORY_MMAP) {
 246                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
 247                        "and/or memory (%u).\n", v4l2_buf->type,
 248                        v4l2_buf->memory);
 249                return -EINVAL;
 250        }
 251
 252        mutex_lock(&queue->mutex);
 253        if (v4l2_buf->index >= queue->count) {
 254                uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
 255                ret = -EINVAL;
 256                goto done;
 257        }
 258
 259        buf = &queue->buffer[v4l2_buf->index];
 260        if (buf->state != UVC_BUF_STATE_IDLE) {
 261                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
 262                        "(%u).\n", buf->state);
 263                ret = -EINVAL;
 264                goto done;
 265        }
 266
 267        if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
 268            v4l2_buf->bytesused > buf->buf.length) {
 269                uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
 270                ret = -EINVAL;
 271                goto done;
 272        }
 273
 274        spin_lock_irqsave(&queue->irqlock, flags);
 275        if (queue->flags & UVC_QUEUE_DISCONNECTED) {
 276                spin_unlock_irqrestore(&queue->irqlock, flags);
 277                ret = -ENODEV;
 278                goto done;
 279        }
 280        buf->state = UVC_BUF_STATE_QUEUED;
 281        if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 282                buf->buf.bytesused = 0;
 283        else
 284                buf->buf.bytesused = v4l2_buf->bytesused;
 285
 286        list_add_tail(&buf->stream, &queue->mainqueue);
 287        list_add_tail(&buf->queue, &queue->irqqueue);
 288        spin_unlock_irqrestore(&queue->irqlock, flags);
 289
 290done:
 291        mutex_unlock(&queue->mutex);
 292        return ret;
 293}
 294
 295static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
 296{
 297        if (nonblocking) {
 298                return (buf->state != UVC_BUF_STATE_QUEUED &&
 299                        buf->state != UVC_BUF_STATE_ACTIVE &&
 300                        buf->state != UVC_BUF_STATE_READY)
 301                        ? 0 : -EAGAIN;
 302        }
 303
 304        return wait_event_interruptible(buf->wait,
 305                buf->state != UVC_BUF_STATE_QUEUED &&
 306                buf->state != UVC_BUF_STATE_ACTIVE &&
 307                buf->state != UVC_BUF_STATE_READY);
 308}
 309
 310/*
 311 * Dequeue a video buffer. If nonblocking is false, block until a buffer is
 312 * available.
 313 */
 314int uvc_dequeue_buffer(struct uvc_video_queue *queue,
 315                struct v4l2_buffer *v4l2_buf, int nonblocking)
 316{
 317        struct uvc_buffer *buf;
 318        int ret = 0;
 319
 320        if (v4l2_buf->type != queue->type ||
 321            v4l2_buf->memory != V4L2_MEMORY_MMAP) {
 322                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
 323                        "and/or memory (%u).\n", v4l2_buf->type,
 324                        v4l2_buf->memory);
 325                return -EINVAL;
 326        }
 327
 328        mutex_lock(&queue->mutex);
 329        if (list_empty(&queue->mainqueue)) {
 330                uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
 331                ret = -EINVAL;
 332                goto done;
 333        }
 334
 335        buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
 336        if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
 337                goto done;
 338
 339        uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
 340                buf->buf.index, buf->state, buf->buf.bytesused);
 341
 342        switch (buf->state) {
 343        case UVC_BUF_STATE_ERROR:
 344                uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
 345                        "(transmission error).\n");
 346                ret = -EIO;
 347        case UVC_BUF_STATE_DONE:
 348                buf->state = UVC_BUF_STATE_IDLE;
 349                break;
 350
 351        case UVC_BUF_STATE_IDLE:
 352        case UVC_BUF_STATE_QUEUED:
 353        case UVC_BUF_STATE_ACTIVE:
 354        case UVC_BUF_STATE_READY:
 355        default:
 356                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
 357                        "(driver bug?).\n", buf->state);
 358                ret = -EINVAL;
 359                goto done;
 360        }
 361
 362        list_del(&buf->stream);
 363        __uvc_query_buffer(buf, v4l2_buf);
 364
 365done:
 366        mutex_unlock(&queue->mutex);
 367        return ret;
 368}
 369
 370/*
 371 * Poll the video queue.
 372 *
 373 * This function implements video queue polling and is intended to be used by
 374 * the device poll handler.
 375 */
 376unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
 377                poll_table *wait)
 378{
 379        struct uvc_buffer *buf;
 380        unsigned int mask = 0;
 381
 382        mutex_lock(&queue->mutex);
 383        if (list_empty(&queue->mainqueue)) {
 384                mask |= POLLERR;
 385                goto done;
 386        }
 387        buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
 388
 389        poll_wait(file, &buf->wait, wait);
 390        if (buf->state == UVC_BUF_STATE_DONE ||
 391            buf->state == UVC_BUF_STATE_ERROR) {
 392                if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 393                        mask |= POLLIN | POLLRDNORM;
 394                else
 395                        mask |= POLLOUT | POLLWRNORM;
 396        }
 397
 398done:
 399        mutex_unlock(&queue->mutex);
 400        return mask;
 401}
 402
 403/*
 404 * Enable or disable the video buffers queue.
 405 *
 406 * The queue must be enabled before starting video acquisition and must be
 407 * disabled after stopping it. This ensures that the video buffers queue
 408 * state can be properly initialized before buffers are accessed from the
 409 * interrupt handler.
 410 *
 411 * Enabling the video queue initializes parameters (such as sequence number,
 412 * sync pattern, ...). If the queue is already enabled, return -EBUSY.
 413 *
 414 * Disabling the video queue cancels the queue and removes all buffers from
 415 * the main queue.
 416 *
 417 * This function can't be called from interrupt context. Use
 418 * uvc_queue_cancel() instead.
 419 */
 420int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
 421{
 422        unsigned int i;
 423        int ret = 0;
 424
 425        mutex_lock(&queue->mutex);
 426        if (enable) {
 427                if (uvc_queue_streaming(queue)) {
 428                        ret = -EBUSY;
 429                        goto done;
 430                }
 431                queue->sequence = 0;
 432                queue->flags |= UVC_QUEUE_STREAMING;
 433                queue->buf_used = 0;
 434        } else {
 435                uvc_queue_cancel(queue, 0);
 436                INIT_LIST_HEAD(&queue->mainqueue);
 437
 438                for (i = 0; i < queue->count; ++i)
 439                        queue->buffer[i].state = UVC_BUF_STATE_IDLE;
 440
 441                queue->flags &= ~UVC_QUEUE_STREAMING;
 442        }
 443
 444done:
 445        mutex_unlock(&queue->mutex);
 446        return ret;
 447}
 448
 449/*
 450 * Cancel the video buffers queue.
 451 *
 452 * Cancelling the queue marks all buffers on the irq queue as erroneous,
 453 * wakes them up and removes them from the queue.
 454 *
 455 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
 456 * fail with -ENODEV.
 457 *
 458 * This function acquires the irq spinlock and can be called from interrupt
 459 * context.
 460 */
 461void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
 462{
 463        struct uvc_buffer *buf;
 464        unsigned long flags;
 465
 466        spin_lock_irqsave(&queue->irqlock, flags);
 467        while (!list_empty(&queue->irqqueue)) {
 468                buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 469                                       queue);
 470                list_del(&buf->queue);
 471                buf->state = UVC_BUF_STATE_ERROR;
 472                wake_up(&buf->wait);
 473        }
 474        /* This must be protected by the irqlock spinlock to avoid race
 475         * conditions between uvc_queue_buffer and the disconnection event that
 476         * could result in an interruptible wait in uvc_dequeue_buffer. Do not
 477         * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
 478         * state outside the queue code.
 479         */
 480        if (disconnect)
 481                queue->flags |= UVC_QUEUE_DISCONNECTED;
 482        spin_unlock_irqrestore(&queue->irqlock, flags);
 483}
 484
 485struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
 486                struct uvc_buffer *buf)
 487{
 488        struct uvc_buffer *nextbuf;
 489        unsigned long flags;
 490
 491        if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
 492            buf->buf.length != buf->buf.bytesused) {
 493                buf->state = UVC_BUF_STATE_QUEUED;
 494                buf->buf.bytesused = 0;
 495                return buf;
 496        }
 497
 498        spin_lock_irqsave(&queue->irqlock, flags);
 499        list_del(&buf->queue);
 500        buf->state = UVC_BUF_STATE_DONE;
 501        if (!list_empty(&queue->irqqueue))
 502                nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 503                                           queue);
 504        else
 505                nextbuf = NULL;
 506        spin_unlock_irqrestore(&queue->irqlock, flags);
 507
 508        buf->buf.sequence = queue->sequence++;
 509
 510        wake_up(&buf->wait);
 511        return nextbuf;
 512}
 513
 514