linux/drivers/media/usb/uvc/uvc_queue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      uvc_queue.c  --  USB Video Class driver - Buffers management
   4 *
   5 *      Copyright (C) 2005-2010
   6 *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/kernel.h>
  11#include <linux/mm.h>
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/usb.h>
  15#include <linux/videodev2.h>
  16#include <linux/vmalloc.h>
  17#include <linux/wait.h>
  18#include <media/videobuf2-v4l2.h>
  19#include <media/videobuf2-vmalloc.h>
  20
  21#include "uvcvideo.h"
  22
  23/* ------------------------------------------------------------------------
  24 * Video buffers queue management.
  25 *
  26 * Video queues is initialized by uvc_queue_init(). The function performs
  27 * basic initialization of the uvc_video_queue struct and never fails.
  28 *
  29 * Video buffers are managed by videobuf2. The driver uses a mutex to protect
  30 * the videobuf2 queue operations by serializing calls to videobuf2 and a
  31 * spinlock to protect the IRQ queue that holds the buffers to be processed by
  32 * the driver.
  33 */
  34
  35static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
  36{
  37        return container_of(buf, struct uvc_buffer, buf);
  38}
  39
  40/*
  41 * Return all queued buffers to videobuf2 in the requested state.
  42 *
  43 * This function must be called with the queue spinlock held.
  44 */
  45static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
  46                               enum uvc_buffer_state state)
  47{
  48        enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
  49                                        ? VB2_BUF_STATE_ERROR
  50                                        : VB2_BUF_STATE_QUEUED;
  51
  52        while (!list_empty(&queue->irqqueue)) {
  53                struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
  54                                                          struct uvc_buffer,
  55                                                          queue);
  56                list_del(&buf->queue);
  57                buf->state = state;
  58                vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
  59        }
  60}
  61
  62/* -----------------------------------------------------------------------------
  63 * videobuf2 queue operations
  64 */
  65
  66static int uvc_queue_setup(struct vb2_queue *vq,
  67                           unsigned int *nbuffers, unsigned int *nplanes,
  68                           unsigned int sizes[], struct device *alloc_devs[])
  69{
  70        struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
  71        struct uvc_streaming *stream;
  72        unsigned int size;
  73
  74        switch (vq->type) {
  75        case V4L2_BUF_TYPE_META_CAPTURE:
  76                size = UVC_METADATA_BUF_SIZE;
  77                break;
  78
  79        default:
  80                stream = uvc_queue_to_stream(queue);
  81                size = stream->ctrl.dwMaxVideoFrameSize;
  82                break;
  83        }
  84
  85        /*
  86         * When called with plane sizes, validate them. The driver supports
  87         * single planar formats only, and requires buffers to be large enough
  88         * to store a complete frame.
  89         */
  90        if (*nplanes)
  91                return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0;
  92
  93        *nplanes = 1;
  94        sizes[0] = size;
  95        return 0;
  96}
  97
  98static int uvc_buffer_prepare(struct vb2_buffer *vb)
  99{
 100        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 101        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
 102        struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
 103
 104        if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
 105            vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
 106                uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE,
 107                        "[E] Bytes used out of bounds\n");
 108                return -EINVAL;
 109        }
 110
 111        if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
 112                return -ENODEV;
 113
 114        buf->state = UVC_BUF_STATE_QUEUED;
 115        buf->error = 0;
 116        buf->mem = vb2_plane_vaddr(vb, 0);
 117        buf->length = vb2_plane_size(vb, 0);
 118        if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
 119                buf->bytesused = 0;
 120        else
 121                buf->bytesused = vb2_get_plane_payload(vb, 0);
 122
 123        return 0;
 124}
 125
 126static void uvc_buffer_queue(struct vb2_buffer *vb)
 127{
 128        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 129        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
 130        struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
 131        unsigned long flags;
 132
 133        spin_lock_irqsave(&queue->irqlock, flags);
 134        if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
 135                kref_init(&buf->ref);
 136                list_add_tail(&buf->queue, &queue->irqqueue);
 137        } else {
 138                /* If the device is disconnected return the buffer to userspace
 139                 * directly. The next QBUF call will fail with -ENODEV.
 140                 */
 141                buf->state = UVC_BUF_STATE_ERROR;
 142                vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 143        }
 144
 145        spin_unlock_irqrestore(&queue->irqlock, flags);
 146}
 147
 148static void uvc_buffer_finish(struct vb2_buffer *vb)
 149{
 150        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 151        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
 152        struct uvc_streaming *stream = uvc_queue_to_stream(queue);
 153        struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
 154
 155        if (vb->state == VB2_BUF_STATE_DONE)
 156                uvc_video_clock_update(stream, vbuf, buf);
 157}
 158
 159static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
 160{
 161        struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
 162        struct uvc_streaming *stream = uvc_queue_to_stream(queue);
 163        int ret;
 164
 165        lockdep_assert_irqs_enabled();
 166
 167        queue->buf_used = 0;
 168
 169        ret = uvc_video_start_streaming(stream);
 170        if (ret == 0)
 171                return 0;
 172
 173        spin_lock_irq(&queue->irqlock);
 174        uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
 175        spin_unlock_irq(&queue->irqlock);
 176
 177        return ret;
 178}
 179
 180static void uvc_stop_streaming(struct vb2_queue *vq)
 181{
 182        struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
 183
 184        lockdep_assert_irqs_enabled();
 185
 186        if (vq->type != V4L2_BUF_TYPE_META_CAPTURE)
 187                uvc_video_stop_streaming(uvc_queue_to_stream(queue));
 188
 189        spin_lock_irq(&queue->irqlock);
 190        uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
 191        spin_unlock_irq(&queue->irqlock);
 192}
 193
 194static const struct vb2_ops uvc_queue_qops = {
 195        .queue_setup = uvc_queue_setup,
 196        .buf_prepare = uvc_buffer_prepare,
 197        .buf_queue = uvc_buffer_queue,
 198        .buf_finish = uvc_buffer_finish,
 199        .wait_prepare = vb2_ops_wait_prepare,
 200        .wait_finish = vb2_ops_wait_finish,
 201        .start_streaming = uvc_start_streaming,
 202        .stop_streaming = uvc_stop_streaming,
 203};
 204
 205static const struct vb2_ops uvc_meta_queue_qops = {
 206        .queue_setup = uvc_queue_setup,
 207        .buf_prepare = uvc_buffer_prepare,
 208        .buf_queue = uvc_buffer_queue,
 209        .wait_prepare = vb2_ops_wait_prepare,
 210        .wait_finish = vb2_ops_wait_finish,
 211        .stop_streaming = uvc_stop_streaming,
 212};
 213
 214int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
 215                    int drop_corrupted)
 216{
 217        int ret;
 218
 219        queue->queue.type = type;
 220        queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
 221        queue->queue.drv_priv = queue;
 222        queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
 223        queue->queue.mem_ops = &vb2_vmalloc_memops;
 224        queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
 225                | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
 226        queue->queue.lock = &queue->mutex;
 227
 228        switch (type) {
 229        case V4L2_BUF_TYPE_META_CAPTURE:
 230                queue->queue.ops = &uvc_meta_queue_qops;
 231                break;
 232        default:
 233                queue->queue.io_modes |= VB2_DMABUF;
 234                queue->queue.ops = &uvc_queue_qops;
 235                break;
 236        }
 237
 238        ret = vb2_queue_init(&queue->queue);
 239        if (ret)
 240                return ret;
 241
 242        mutex_init(&queue->mutex);
 243        spin_lock_init(&queue->irqlock);
 244        INIT_LIST_HEAD(&queue->irqqueue);
 245        queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
 246
 247        return 0;
 248}
 249
 250void uvc_queue_release(struct uvc_video_queue *queue)
 251{
 252        mutex_lock(&queue->mutex);
 253        vb2_queue_release(&queue->queue);
 254        mutex_unlock(&queue->mutex);
 255}
 256
 257/* -----------------------------------------------------------------------------
 258 * V4L2 queue operations
 259 */
 260
 261int uvc_request_buffers(struct uvc_video_queue *queue,
 262                        struct v4l2_requestbuffers *rb)
 263{
 264        int ret;
 265
 266        mutex_lock(&queue->mutex);
 267        ret = vb2_reqbufs(&queue->queue, rb);
 268        mutex_unlock(&queue->mutex);
 269
 270        return ret ? ret : rb->count;
 271}
 272
 273int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
 274{
 275        int ret;
 276
 277        mutex_lock(&queue->mutex);
 278        ret = vb2_querybuf(&queue->queue, buf);
 279        mutex_unlock(&queue->mutex);
 280
 281        return ret;
 282}
 283
 284int uvc_create_buffers(struct uvc_video_queue *queue,
 285                       struct v4l2_create_buffers *cb)
 286{
 287        int ret;
 288
 289        mutex_lock(&queue->mutex);
 290        ret = vb2_create_bufs(&queue->queue, cb);
 291        mutex_unlock(&queue->mutex);
 292
 293        return ret;
 294}
 295
 296int uvc_queue_buffer(struct uvc_video_queue *queue,
 297                     struct media_device *mdev, struct v4l2_buffer *buf)
 298{
 299        int ret;
 300
 301        mutex_lock(&queue->mutex);
 302        ret = vb2_qbuf(&queue->queue, mdev, buf);
 303        mutex_unlock(&queue->mutex);
 304
 305        return ret;
 306}
 307
 308int uvc_export_buffer(struct uvc_video_queue *queue,
 309                      struct v4l2_exportbuffer *exp)
 310{
 311        int ret;
 312
 313        mutex_lock(&queue->mutex);
 314        ret = vb2_expbuf(&queue->queue, exp);
 315        mutex_unlock(&queue->mutex);
 316
 317        return ret;
 318}
 319
 320int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
 321                       int nonblocking)
 322{
 323        int ret;
 324
 325        mutex_lock(&queue->mutex);
 326        ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
 327        mutex_unlock(&queue->mutex);
 328
 329        return ret;
 330}
 331
 332int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
 333{
 334        int ret;
 335
 336        mutex_lock(&queue->mutex);
 337        ret = vb2_streamon(&queue->queue, type);
 338        mutex_unlock(&queue->mutex);
 339
 340        return ret;
 341}
 342
 343int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
 344{
 345        int ret;
 346
 347        mutex_lock(&queue->mutex);
 348        ret = vb2_streamoff(&queue->queue, type);
 349        mutex_unlock(&queue->mutex);
 350
 351        return ret;
 352}
 353
 354int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
 355{
 356        return vb2_mmap(&queue->queue, vma);
 357}
 358
 359#ifndef CONFIG_MMU
 360unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
 361                unsigned long pgoff)
 362{
 363        return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
 364}
 365#endif
 366
 367__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
 368                            poll_table *wait)
 369{
 370        __poll_t ret;
 371
 372        mutex_lock(&queue->mutex);
 373        ret = vb2_poll(&queue->queue, file, wait);
 374        mutex_unlock(&queue->mutex);
 375
 376        return ret;
 377}
 378
 379/* -----------------------------------------------------------------------------
 380 *
 381 */
 382
 383/*
 384 * Check if buffers have been allocated.
 385 */
 386int uvc_queue_allocated(struct uvc_video_queue *queue)
 387{
 388        int allocated;
 389
 390        mutex_lock(&queue->mutex);
 391        allocated = vb2_is_busy(&queue->queue);
 392        mutex_unlock(&queue->mutex);
 393
 394        return allocated;
 395}
 396
 397/*
 398 * Cancel the video buffers queue.
 399 *
 400 * Cancelling the queue marks all buffers on the irq queue as erroneous,
 401 * wakes them up and removes them from the queue.
 402 *
 403 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
 404 * fail with -ENODEV.
 405 *
 406 * This function acquires the irq spinlock and can be called from interrupt
 407 * context.
 408 */
 409void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
 410{
 411        unsigned long flags;
 412
 413        spin_lock_irqsave(&queue->irqlock, flags);
 414        uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
 415        /* This must be protected by the irqlock spinlock to avoid race
 416         * conditions between uvc_buffer_queue and the disconnection event that
 417         * could result in an interruptible wait in uvc_dequeue_buffer. Do not
 418         * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
 419         * state outside the queue code.
 420         */
 421        if (disconnect)
 422                queue->flags |= UVC_QUEUE_DISCONNECTED;
 423        spin_unlock_irqrestore(&queue->irqlock, flags);
 424}
 425
 426/*
 427 * uvc_queue_get_current_buffer: Obtain the current working output buffer
 428 *
 429 * Buffers may span multiple packets, and even URBs, therefore the active buffer
 430 * remains on the queue until the EOF marker.
 431 */
 432static struct uvc_buffer *
 433__uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
 434{
 435        if (list_empty(&queue->irqqueue))
 436                return NULL;
 437
 438        return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue);
 439}
 440
 441struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
 442{
 443        struct uvc_buffer *nextbuf;
 444        unsigned long flags;
 445
 446        spin_lock_irqsave(&queue->irqlock, flags);
 447        nextbuf = __uvc_queue_get_current_buffer(queue);
 448        spin_unlock_irqrestore(&queue->irqlock, flags);
 449
 450        return nextbuf;
 451}
 452
 453/*
 454 * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue
 455 *
 456 * Reuse a buffer through our internal queue without the need to 'prepare'.
 457 * The buffer will be returned to userspace through the uvc_buffer_queue call if
 458 * the device has been disconnected.
 459 */
 460static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue,
 461                struct uvc_buffer *buf)
 462{
 463        buf->error = 0;
 464        buf->state = UVC_BUF_STATE_QUEUED;
 465        buf->bytesused = 0;
 466        vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
 467
 468        uvc_buffer_queue(&buf->buf.vb2_buf);
 469}
 470
 471static void uvc_queue_buffer_complete(struct kref *ref)
 472{
 473        struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref);
 474        struct vb2_buffer *vb = &buf->buf.vb2_buf;
 475        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
 476
 477        if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
 478                uvc_queue_buffer_requeue(queue, buf);
 479                return;
 480        }
 481
 482        buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
 483        vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
 484        vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 485}
 486
 487/*
 488 * Release a reference on the buffer. Complete the buffer when the last
 489 * reference is released.
 490 */
 491void uvc_queue_buffer_release(struct uvc_buffer *buf)
 492{
 493        kref_put(&buf->ref, uvc_queue_buffer_complete);
 494}
 495
 496/*
 497 * Remove this buffer from the queue. Lifetime will persist while async actions
 498 * are still running (if any), and uvc_queue_buffer_release will give the buffer
 499 * back to VB2 when all users have completed.
 500 */
 501struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
 502                struct uvc_buffer *buf)
 503{
 504        struct uvc_buffer *nextbuf;
 505        unsigned long flags;
 506
 507        spin_lock_irqsave(&queue->irqlock, flags);
 508        list_del(&buf->queue);
 509        nextbuf = __uvc_queue_get_current_buffer(queue);
 510        spin_unlock_irqrestore(&queue->irqlock, flags);
 511
 512        uvc_queue_buffer_release(buf);
 513
 514        return nextbuf;
 515}
 516