linux/drivers/usb/usb-skeleton.c
<<
>>
Prefs
   1/*
   2 * USB Skeleton driver - 2.2
   3 *
   4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License as
   8 *      published by the Free Software Foundation, version 2.
   9 *
  10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
  11 * but has been rewritten to be easier to read and use.
  12 *
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/init.h>
  18#include <linux/slab.h>
  19#include <linux/module.h>
  20#include <linux/kref.h>
  21#include <linux/uaccess.h>
  22#include <linux/usb.h>
  23#include <linux/mutex.h>
  24
  25
  26/* Define these values to match your devices */
  27#define USB_SKEL_VENDOR_ID      0xfff0
  28#define USB_SKEL_PRODUCT_ID     0xfff0
  29
  30/* table of devices that work with this driver */
  31static const struct usb_device_id skel_table[] = {
  32        { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
  33        { }                                     /* Terminating entry */
  34};
  35MODULE_DEVICE_TABLE(usb, skel_table);
  36
  37
  38/* Get a minor range for your devices from the usb maintainer */
  39#define USB_SKEL_MINOR_BASE     192
  40
  41/* our private defines. if this grows any larger, use your own .h file */
  42#define MAX_TRANSFER            (PAGE_SIZE - 512)
  43/* MAX_TRANSFER is chosen so that the VM is not stressed by
  44   allocations > PAGE_SIZE and the number of packets in a page
  45   is an integer 512 is the largest possible packet on EHCI */
  46#define WRITES_IN_FLIGHT        8
  47/* arbitrarily chosen */
  48
  49/* Structure to hold all of our device specific stuff */
  50struct usb_skel {
  51        struct usb_device       *udev;                  /* the usb device for this device */
  52        struct usb_interface    *interface;             /* the interface for this device */
  53        struct semaphore        limit_sem;              /* limiting the number of writes in progress */
  54        struct usb_anchor       submitted;              /* in case we need to retract our submissions */
  55        struct urb              *bulk_in_urb;           /* the urb to read data with */
  56        unsigned char           *bulk_in_buffer;        /* the buffer to receive data */
  57        size_t                  bulk_in_size;           /* the size of the receive buffer */
  58        size_t                  bulk_in_filled;         /* number of bytes in the buffer */
  59        size_t                  bulk_in_copied;         /* already copied to user space */
  60        __u8                    bulk_in_endpointAddr;   /* the address of the bulk in endpoint */
  61        __u8                    bulk_out_endpointAddr;  /* the address of the bulk out endpoint */
  62        int                     errors;                 /* the last request tanked */
  63        bool                    ongoing_read;           /* a read is going on */
  64        spinlock_t              err_lock;               /* lock for errors */
  65        struct kref             kref;
  66        struct mutex            io_mutex;               /* synchronize I/O with disconnect */
  67        wait_queue_head_t       bulk_in_wait;           /* to wait for an ongoing read */
  68};
  69#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
  70
  71static struct usb_driver skel_driver;
  72static void skel_draw_down(struct usb_skel *dev);
  73
  74static void skel_delete(struct kref *kref)
  75{
  76        struct usb_skel *dev = to_skel_dev(kref);
  77
  78        usb_free_urb(dev->bulk_in_urb);
  79        usb_put_dev(dev->udev);
  80        kfree(dev->bulk_in_buffer);
  81        kfree(dev);
  82}
  83
  84static int skel_open(struct inode *inode, struct file *file)
  85{
  86        struct usb_skel *dev;
  87        struct usb_interface *interface;
  88        int subminor;
  89        int retval = 0;
  90
  91        subminor = iminor(inode);
  92
  93        interface = usb_find_interface(&skel_driver, subminor);
  94        if (!interface) {
  95                pr_err("%s - error, can't find device for minor %d\n",
  96                        __func__, subminor);
  97                retval = -ENODEV;
  98                goto exit;
  99        }
 100
 101        dev = usb_get_intfdata(interface);
 102        if (!dev) {
 103                retval = -ENODEV;
 104                goto exit;
 105        }
 106
 107        retval = usb_autopm_get_interface(interface);
 108        if (retval)
 109                goto exit;
 110
 111        /* increment our usage count for the device */
 112        kref_get(&dev->kref);
 113
 114        /* save our object in the file's private structure */
 115        file->private_data = dev;
 116
 117exit:
 118        return retval;
 119}
 120
 121static int skel_release(struct inode *inode, struct file *file)
 122{
 123        struct usb_skel *dev;
 124
 125        dev = file->private_data;
 126        if (dev == NULL)
 127                return -ENODEV;
 128
 129        /* allow the device to be autosuspended */
 130        mutex_lock(&dev->io_mutex);
 131        if (dev->interface)
 132                usb_autopm_put_interface(dev->interface);
 133        mutex_unlock(&dev->io_mutex);
 134
 135        /* decrement the count on our device */
 136        kref_put(&dev->kref, skel_delete);
 137        return 0;
 138}
 139
 140static int skel_flush(struct file *file, fl_owner_t id)
 141{
 142        struct usb_skel *dev;
 143        int res;
 144
 145        dev = file->private_data;
 146        if (dev == NULL)
 147                return -ENODEV;
 148
 149        /* wait for io to stop */
 150        mutex_lock(&dev->io_mutex);
 151        skel_draw_down(dev);
 152
 153        /* read out errors, leave subsequent opens a clean slate */
 154        spin_lock_irq(&dev->err_lock);
 155        res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
 156        dev->errors = 0;
 157        spin_unlock_irq(&dev->err_lock);
 158
 159        mutex_unlock(&dev->io_mutex);
 160
 161        return res;
 162}
 163
 164static void skel_read_bulk_callback(struct urb *urb)
 165{
 166        struct usb_skel *dev;
 167
 168        dev = urb->context;
 169
 170        spin_lock(&dev->err_lock);
 171        /* sync/async unlink faults aren't errors */
 172        if (urb->status) {
 173                if (!(urb->status == -ENOENT ||
 174                    urb->status == -ECONNRESET ||
 175                    urb->status == -ESHUTDOWN))
 176                        dev_err(&dev->interface->dev,
 177                                "%s - nonzero write bulk status received: %d\n",
 178                                __func__, urb->status);
 179
 180                dev->errors = urb->status;
 181        } else {
 182                dev->bulk_in_filled = urb->actual_length;
 183        }
 184        dev->ongoing_read = 0;
 185        spin_unlock(&dev->err_lock);
 186
 187        wake_up_interruptible(&dev->bulk_in_wait);
 188}
 189
 190static int skel_do_read_io(struct usb_skel *dev, size_t count)
 191{
 192        int rv;
 193
 194        /* prepare a read */
 195        usb_fill_bulk_urb(dev->bulk_in_urb,
 196                        dev->udev,
 197                        usb_rcvbulkpipe(dev->udev,
 198                                dev->bulk_in_endpointAddr),
 199                        dev->bulk_in_buffer,
 200                        min(dev->bulk_in_size, count),
 201                        skel_read_bulk_callback,
 202                        dev);
 203        /* tell everybody to leave the URB alone */
 204        spin_lock_irq(&dev->err_lock);
 205        dev->ongoing_read = 1;
 206        spin_unlock_irq(&dev->err_lock);
 207
 208        /* submit bulk in urb, which means no data to deliver */
 209        dev->bulk_in_filled = 0;
 210        dev->bulk_in_copied = 0;
 211
 212        /* do it */
 213        rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
 214        if (rv < 0) {
 215                dev_err(&dev->interface->dev,
 216                        "%s - failed submitting read urb, error %d\n",
 217                        __func__, rv);
 218                rv = (rv == -ENOMEM) ? rv : -EIO;
 219                spin_lock_irq(&dev->err_lock);
 220                dev->ongoing_read = 0;
 221                spin_unlock_irq(&dev->err_lock);
 222        }
 223
 224        return rv;
 225}
 226
 227static ssize_t skel_read(struct file *file, char *buffer, size_t count,
 228                         loff_t *ppos)
 229{
 230        struct usb_skel *dev;
 231        int rv;
 232        bool ongoing_io;
 233
 234        dev = file->private_data;
 235
 236        /* if we cannot read at all, return EOF */
 237        if (!dev->bulk_in_urb || !count)
 238                return 0;
 239
 240        /* no concurrent readers */
 241        rv = mutex_lock_interruptible(&dev->io_mutex);
 242        if (rv < 0)
 243                return rv;
 244
 245        if (!dev->interface) {          /* disconnect() was called */
 246                rv = -ENODEV;
 247                goto exit;
 248        }
 249
 250        /* if IO is under way, we must not touch things */
 251retry:
 252        spin_lock_irq(&dev->err_lock);
 253        ongoing_io = dev->ongoing_read;
 254        spin_unlock_irq(&dev->err_lock);
 255
 256        if (ongoing_io) {
 257                /* nonblocking IO shall not wait */
 258                if (file->f_flags & O_NONBLOCK) {
 259                        rv = -EAGAIN;
 260                        goto exit;
 261                }
 262                /*
 263                 * IO may take forever
 264                 * hence wait in an interruptible state
 265                 */
 266                rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
 267                if (rv < 0)
 268                        goto exit;
 269        }
 270
 271        /* errors must be reported */
 272        rv = dev->errors;
 273        if (rv < 0) {
 274                /* any error is reported once */
 275                dev->errors = 0;
 276                /* to preserve notifications about reset */
 277                rv = (rv == -EPIPE) ? rv : -EIO;
 278                /* report it */
 279                goto exit;
 280        }
 281
 282        /*
 283         * if the buffer is filled we may satisfy the read
 284         * else we need to start IO
 285         */
 286
 287        if (dev->bulk_in_filled) {
 288                /* we had read data */
 289                size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
 290                size_t chunk = min(available, count);
 291
 292                if (!available) {
 293                        /*
 294                         * all data has been used
 295                         * actual IO needs to be done
 296                         */
 297                        rv = skel_do_read_io(dev, count);
 298                        if (rv < 0)
 299                                goto exit;
 300                        else
 301                                goto retry;
 302                }
 303                /*
 304                 * data is available
 305                 * chunk tells us how much shall be copied
 306                 */
 307
 308                if (copy_to_user(buffer,
 309                                 dev->bulk_in_buffer + dev->bulk_in_copied,
 310                                 chunk))
 311                        rv = -EFAULT;
 312                else
 313                        rv = chunk;
 314
 315                dev->bulk_in_copied += chunk;
 316
 317                /*
 318                 * if we are asked for more than we have,
 319                 * we start IO but don't wait
 320                 */
 321                if (available < count)
 322                        skel_do_read_io(dev, count - chunk);
 323        } else {
 324                /* no data in the buffer */
 325                rv = skel_do_read_io(dev, count);
 326                if (rv < 0)
 327                        goto exit;
 328                else if (!(file->f_flags & O_NONBLOCK))
 329                        goto retry;
 330                rv = -EAGAIN;
 331        }
 332exit:
 333        mutex_unlock(&dev->io_mutex);
 334        return rv;
 335}
 336
 337static void skel_write_bulk_callback(struct urb *urb)
 338{
 339        struct usb_skel *dev;
 340
 341        dev = urb->context;
 342
 343        /* sync/async unlink faults aren't errors */
 344        if (urb->status) {
 345                if (!(urb->status == -ENOENT ||
 346                    urb->status == -ECONNRESET ||
 347                    urb->status == -ESHUTDOWN))
 348                        dev_err(&dev->interface->dev,
 349                                "%s - nonzero write bulk status received: %d\n",
 350                                __func__, urb->status);
 351
 352                spin_lock(&dev->err_lock);
 353                dev->errors = urb->status;
 354                spin_unlock(&dev->err_lock);
 355        }
 356
 357        /* free up our allocated buffer */
 358        usb_free_coherent(urb->dev, urb->transfer_buffer_length,
 359                          urb->transfer_buffer, urb->transfer_dma);
 360        up(&dev->limit_sem);
 361}
 362
 363static ssize_t skel_write(struct file *file, const char *user_buffer,
 364                          size_t count, loff_t *ppos)
 365{
 366        struct usb_skel *dev;
 367        int retval = 0;
 368        struct urb *urb = NULL;
 369        char *buf = NULL;
 370        size_t writesize = min(count, (size_t)MAX_TRANSFER);
 371
 372        dev = file->private_data;
 373
 374        /* verify that we actually have some data to write */
 375        if (count == 0)
 376                goto exit;
 377
 378        /*
 379         * limit the number of URBs in flight to stop a user from using up all
 380         * RAM
 381         */
 382        if (!(file->f_flags & O_NONBLOCK)) {
 383                if (down_interruptible(&dev->limit_sem)) {
 384                        retval = -ERESTARTSYS;
 385                        goto exit;
 386                }
 387        } else {
 388                if (down_trylock(&dev->limit_sem)) {
 389                        retval = -EAGAIN;
 390                        goto exit;
 391                }
 392        }
 393
 394        spin_lock_irq(&dev->err_lock);
 395        retval = dev->errors;
 396        if (retval < 0) {
 397                /* any error is reported once */
 398                dev->errors = 0;
 399                /* to preserve notifications about reset */
 400                retval = (retval == -EPIPE) ? retval : -EIO;
 401        }
 402        spin_unlock_irq(&dev->err_lock);
 403        if (retval < 0)
 404                goto error;
 405
 406        /* create a urb, and a buffer for it, and copy the data to the urb */
 407        urb = usb_alloc_urb(0, GFP_KERNEL);
 408        if (!urb) {
 409                retval = -ENOMEM;
 410                goto error;
 411        }
 412
 413        buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
 414                                 &urb->transfer_dma);
 415        if (!buf) {
 416                retval = -ENOMEM;
 417                goto error;
 418        }
 419
 420        if (copy_from_user(buf, user_buffer, writesize)) {
 421                retval = -EFAULT;
 422                goto error;
 423        }
 424
 425        /* this lock makes sure we don't submit URBs to gone devices */
 426        mutex_lock(&dev->io_mutex);
 427        if (!dev->interface) {          /* disconnect() was called */
 428                mutex_unlock(&dev->io_mutex);
 429                retval = -ENODEV;
 430                goto error;
 431        }
 432
 433        /* initialize the urb properly */
 434        usb_fill_bulk_urb(urb, dev->udev,
 435                          usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
 436                          buf, writesize, skel_write_bulk_callback, dev);
 437        urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 438        usb_anchor_urb(urb, &dev->submitted);
 439
 440        /* send the data out the bulk port */
 441        retval = usb_submit_urb(urb, GFP_KERNEL);
 442        mutex_unlock(&dev->io_mutex);
 443        if (retval) {
 444                dev_err(&dev->interface->dev,
 445                        "%s - failed submitting write urb, error %d\n",
 446                        __func__, retval);
 447                goto error_unanchor;
 448        }
 449
 450        /*
 451         * release our reference to this urb, the USB core will eventually free
 452         * it entirely
 453         */
 454        usb_free_urb(urb);
 455
 456
 457        return writesize;
 458
 459error_unanchor:
 460        usb_unanchor_urb(urb);
 461error:
 462        if (urb) {
 463                usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
 464                usb_free_urb(urb);
 465        }
 466        up(&dev->limit_sem);
 467
 468exit:
 469        return retval;
 470}
 471
 472static const struct file_operations skel_fops = {
 473        .owner =        THIS_MODULE,
 474        .read =         skel_read,
 475        .write =        skel_write,
 476        .open =         skel_open,
 477        .release =      skel_release,
 478        .flush =        skel_flush,
 479        .llseek =       noop_llseek,
 480};
 481
 482/*
 483 * usb class driver info in order to get a minor number from the usb core,
 484 * and to have the device registered with the driver core
 485 */
 486static struct usb_class_driver skel_class = {
 487        .name =         "skel%d",
 488        .fops =         &skel_fops,
 489        .minor_base =   USB_SKEL_MINOR_BASE,
 490};
 491
 492static int skel_probe(struct usb_interface *interface,
 493                      const struct usb_device_id *id)
 494{
 495        struct usb_skel *dev;
 496        struct usb_host_interface *iface_desc;
 497        struct usb_endpoint_descriptor *endpoint;
 498        size_t buffer_size;
 499        int i;
 500        int retval = -ENOMEM;
 501
 502        /* allocate memory for our device state and initialize it */
 503        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 504        if (!dev) {
 505                dev_err(&interface->dev, "Out of memory\n");
 506                goto error;
 507        }
 508        kref_init(&dev->kref);
 509        sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
 510        mutex_init(&dev->io_mutex);
 511        spin_lock_init(&dev->err_lock);
 512        init_usb_anchor(&dev->submitted);
 513        init_waitqueue_head(&dev->bulk_in_wait);
 514
 515        dev->udev = usb_get_dev(interface_to_usbdev(interface));
 516        dev->interface = interface;
 517
 518        /* set up the endpoint information */
 519        /* use only the first bulk-in and bulk-out endpoints */
 520        iface_desc = interface->cur_altsetting;
 521        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
 522                endpoint = &iface_desc->endpoint[i].desc;
 523
 524                if (!dev->bulk_in_endpointAddr &&
 525                    usb_endpoint_is_bulk_in(endpoint)) {
 526                        /* we found a bulk in endpoint */
 527                        buffer_size = usb_endpoint_maxp(endpoint);
 528                        dev->bulk_in_size = buffer_size;
 529                        dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
 530                        dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
 531                        if (!dev->bulk_in_buffer) {
 532                                dev_err(&interface->dev,
 533                                        "Could not allocate bulk_in_buffer\n");
 534                                goto error;
 535                        }
 536                        dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
 537                        if (!dev->bulk_in_urb) {
 538                                dev_err(&interface->dev,
 539                                        "Could not allocate bulk_in_urb\n");
 540                                goto error;
 541                        }
 542                }
 543
 544                if (!dev->bulk_out_endpointAddr &&
 545                    usb_endpoint_is_bulk_out(endpoint)) {
 546                        /* we found a bulk out endpoint */
 547                        dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
 548                }
 549        }
 550        if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
 551                dev_err(&interface->dev,
 552                        "Could not find both bulk-in and bulk-out endpoints\n");
 553                goto error;
 554        }
 555
 556        /* save our data pointer in this interface device */
 557        usb_set_intfdata(interface, dev);
 558
 559        /* we can register the device now, as it is ready */
 560        retval = usb_register_dev(interface, &skel_class);
 561        if (retval) {
 562                /* something prevented us from registering this driver */
 563                dev_err(&interface->dev,
 564                        "Not able to get a minor for this device.\n");
 565                usb_set_intfdata(interface, NULL);
 566                goto error;
 567        }
 568
 569        /* let the user know what node this device is now attached to */
 570        dev_info(&interface->dev,
 571                 "USB Skeleton device now attached to USBSkel-%d",
 572                 interface->minor);
 573        return 0;
 574
 575error:
 576        if (dev)
 577                /* this frees allocated memory */
 578                kref_put(&dev->kref, skel_delete);
 579        return retval;
 580}
 581
 582static void skel_disconnect(struct usb_interface *interface)
 583{
 584        struct usb_skel *dev;
 585        int minor = interface->minor;
 586
 587        dev = usb_get_intfdata(interface);
 588        usb_set_intfdata(interface, NULL);
 589
 590        /* give back our minor */
 591        usb_deregister_dev(interface, &skel_class);
 592
 593        /* prevent more I/O from starting */
 594        mutex_lock(&dev->io_mutex);
 595        dev->interface = NULL;
 596        mutex_unlock(&dev->io_mutex);
 597
 598        usb_kill_anchored_urbs(&dev->submitted);
 599
 600        /* decrement our usage count */
 601        kref_put(&dev->kref, skel_delete);
 602
 603        dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
 604}
 605
 606static void skel_draw_down(struct usb_skel *dev)
 607{
 608        int time;
 609
 610        time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
 611        if (!time)
 612                usb_kill_anchored_urbs(&dev->submitted);
 613        usb_kill_urb(dev->bulk_in_urb);
 614}
 615
 616static int skel_suspend(struct usb_interface *intf, pm_message_t message)
 617{
 618        struct usb_skel *dev = usb_get_intfdata(intf);
 619
 620        if (!dev)
 621                return 0;
 622        skel_draw_down(dev);
 623        return 0;
 624}
 625
 626static int skel_resume(struct usb_interface *intf)
 627{
 628        return 0;
 629}
 630
 631static int skel_pre_reset(struct usb_interface *intf)
 632{
 633        struct usb_skel *dev = usb_get_intfdata(intf);
 634
 635        mutex_lock(&dev->io_mutex);
 636        skel_draw_down(dev);
 637
 638        return 0;
 639}
 640
 641static int skel_post_reset(struct usb_interface *intf)
 642{
 643        struct usb_skel *dev = usb_get_intfdata(intf);
 644
 645        /* we are sure no URBs are active - no locking needed */
 646        dev->errors = -EPIPE;
 647        mutex_unlock(&dev->io_mutex);
 648
 649        return 0;
 650}
 651
 652static struct usb_driver skel_driver = {
 653        .name =         "skeleton",
 654        .probe =        skel_probe,
 655        .disconnect =   skel_disconnect,
 656        .suspend =      skel_suspend,
 657        .resume =       skel_resume,
 658        .pre_reset =    skel_pre_reset,
 659        .post_reset =   skel_post_reset,
 660        .id_table =     skel_table,
 661        .supports_autosuspend = 1,
 662};
 663
 664module_usb_driver(skel_driver);
 665
 666MODULE_LICENSE("GPL");
 667
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.