linux/drivers/block/ub.c
<<
>>
Prefs
   1/*
   2 * The low performance USB storage driver (ub).
   3 *
   4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
   5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
   6 *
   7 * This work is a part of Linux kernel, is derived from it,
   8 * and is not licensed separately. See file COPYING for details.
   9 *
  10 * TODO (sorted by decreasing priority)
  11 *  -- Return sense now that rq allows it (we always auto-sense anyway).
  12 *  -- set readonly flag for CDs, set removable flag for CF readers
  13 *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
  14 *  -- verify the 13 conditions and do bulk resets
  15 *  -- highmem
  16 *  -- move top_sense and work_bcs into separate allocations (if they survive)
  17 *     for cache purists and esoteric architectures.
  18 *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
  19 *  -- prune comments, they are too volumnous
  20 *  -- Resove XXX's
  21 *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
  22 */
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/usb.h>
  26#include <linux/usb_usual.h>
  27#include <linux/blkdev.h>
  28#include <linux/timer.h>
  29#include <linux/scatterlist.h>
  30#include <scsi/scsi.h>
  31
  32#define DRV_NAME "ub"
  33
  34#define UB_MAJOR 180
  35
  36/*
  37 * The command state machine is the key model for understanding of this driver.
  38 *
  39 * The general rule is that all transitions are done towards the bottom
  40 * of the diagram, thus preventing any loops.
  41 *
  42 * An exception to that is how the STAT state is handled. A counter allows it
  43 * to be re-entered along the path marked with [C].
  44 *
  45 *       +--------+
  46 *       ! INIT   !
  47 *       +--------+
  48 *           !
  49 *        ub_scsi_cmd_start fails ->--------------------------------------\
  50 *           !                                                            !
  51 *           V                                                            !
  52 *       +--------+                                                       !
  53 *       ! CMD    !                                                       !
  54 *       +--------+                                                       !
  55 *           !                                            +--------+      !
  56 *         was -EPIPE -->-------------------------------->! CLEAR  !      !
  57 *           !                                            +--------+      !
  58 *           !                                                !           !
  59 *         was error -->------------------------------------- ! --------->\
  60 *           !                                                !           !
  61 *  /--<-- cmd->dir == NONE ?                                 !           !
  62 *  !        !                                                !           !
  63 *  !        V                                                !           !
  64 *  !    +--------+                                           !           !
  65 *  !    ! DATA   !                                           !           !
  66 *  !    +--------+                                           !           !
  67 *  !        !                           +---------+          !           !
  68 *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
  69 *  !        !                           +---------+          !           !
  70 *  !        !                                !               !           !
  71 *  !        !                              was error -->---- ! --------->\
  72 *  !      was error -->--------------------- ! ------------- ! --------->\
  73 *  !        !                                !               !           !
  74 *  !        V                                !               !           !
  75 *  \--->+--------+                           !               !           !
  76 *       ! STAT   !<--------------------------/               !           !
  77 *  /--->+--------+                                           !           !
  78 *  !        !                                                !           !
  79 * [C]     was -EPIPE -->-----------\                         !           !
  80 *  !        !                      !                         !           !
  81 *  +<---- len == 0                 !                         !           !
  82 *  !        !                      !                         !           !
  83 *  !      was error -->--------------------------------------!---------->\
  84 *  !        !                      !                         !           !
  85 *  +<---- bad CSW                  !                         !           !
  86 *  +<---- bad tag                  !                         !           !
  87 *  !        !                      V                         !           !
  88 *  !        !                 +--------+                     !           !
  89 *  !        !                 ! CLRRS  !                     !           !
  90 *  !        !                 +--------+                     !           !
  91 *  !        !                      !                         !           !
  92 *  \------- ! --------------------[C]--------\               !           !
  93 *           !                                !               !           !
  94 *         cmd->error---\                +--------+           !           !
  95 *           !          +--------------->! SENSE  !<----------/           !
  96 *         STAT_FAIL----/                +--------+                       !
  97 *           !                                !                           V
  98 *           !                                V                      +--------+
  99 *           \--------------------------------\--------------------->! DONE   !
 100 *                                                                   +--------+
 101 */
 102
 103/*
 104 * This many LUNs per USB device.
 105 * Every one of them takes a host, see UB_MAX_HOSTS.
 106 */
 107#define UB_MAX_LUNS   9
 108
 109/*
 110 */
 111
 112#define UB_PARTS_PER_LUN      8
 113
 114#define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
 115
 116#define UB_SENSE_SIZE  18
 117
 118/*
 119 */
 120
 121/* command block wrapper */
 122struct bulk_cb_wrap {
 123        __le32  Signature;              /* contains 'USBC' */
 124        u32     Tag;                    /* unique per command id */
 125        __le32  DataTransferLength;     /* size of data */
 126        u8      Flags;                  /* direction in bit 0 */
 127        u8      Lun;                    /* LUN */
 128        u8      Length;                 /* of of the CDB */
 129        u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
 130};
 131
 132#define US_BULK_CB_WRAP_LEN     31
 133#define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
 134#define US_BULK_FLAG_IN         1
 135#define US_BULK_FLAG_OUT        0
 136
 137/* command status wrapper */
 138struct bulk_cs_wrap {
 139        __le32  Signature;              /* should = 'USBS' */
 140        u32     Tag;                    /* same as original command */
 141        __le32  Residue;                /* amount not transferred */
 142        u8      Status;                 /* see below */
 143};
 144
 145#define US_BULK_CS_WRAP_LEN     13
 146#define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
 147#define US_BULK_STAT_OK         0
 148#define US_BULK_STAT_FAIL       1
 149#define US_BULK_STAT_PHASE      2
 150
 151/* bulk-only class specific requests */
 152#define US_BULK_RESET_REQUEST   0xff
 153#define US_BULK_GET_MAX_LUN     0xfe
 154
 155/*
 156 */
 157struct ub_dev;
 158
 159#define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
 160#define UB_MAX_SECTORS 64
 161
 162/*
 163 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
 164 * even if a webcam hogs the bus, but some devices need time to spin up.
 165 */
 166#define UB_URB_TIMEOUT  (HZ*2)
 167#define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
 168#define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
 169#define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
 170
 171/*
 172 * An instance of a SCSI command in transit.
 173 */
 174#define UB_DIR_NONE     0
 175#define UB_DIR_READ     1
 176#define UB_DIR_ILLEGAL2 2
 177#define UB_DIR_WRITE    3
 178
 179#define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
 180                         (((c)==UB_DIR_READ)? 'r': 'n'))
 181
 182enum ub_scsi_cmd_state {
 183        UB_CMDST_INIT,                  /* Initial state */
 184        UB_CMDST_CMD,                   /* Command submitted */
 185        UB_CMDST_DATA,                  /* Data phase */
 186        UB_CMDST_CLR2STS,               /* Clearing before requesting status */
 187        UB_CMDST_STAT,                  /* Status phase */
 188        UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
 189        UB_CMDST_CLRRS,                 /* Clearing before retrying status */
 190        UB_CMDST_SENSE,                 /* Sending Request Sense */
 191        UB_CMDST_DONE                   /* Final state */
 192};
 193
 194struct ub_scsi_cmd {
 195        unsigned char cdb[UB_MAX_CDB_SIZE];
 196        unsigned char cdb_len;
 197
 198        unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
 199        enum ub_scsi_cmd_state state;
 200        unsigned int tag;
 201        struct ub_scsi_cmd *next;
 202
 203        int error;                      /* Return code - valid upon done */
 204        unsigned int act_len;           /* Return size */
 205        unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
 206
 207        int stat_count;                 /* Retries getting status. */
 208        unsigned int timeo;             /* jiffies until rq->timeout changes */
 209
 210        unsigned int len;               /* Requested length */
 211        unsigned int current_sg;
 212        unsigned int nsg;               /* sgv[nsg] */
 213        struct scatterlist sgv[UB_MAX_REQ_SG];
 214
 215        struct ub_lun *lun;
 216        void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
 217        void *back;
 218};
 219
 220struct ub_request {
 221        struct request *rq;
 222        unsigned int current_try;
 223        unsigned int nsg;               /* sgv[nsg] */
 224        struct scatterlist sgv[UB_MAX_REQ_SG];
 225};
 226
 227/*
 228 */
 229struct ub_capacity {
 230        unsigned long nsec;             /* Linux size - 512 byte sectors */
 231        unsigned int bsize;             /* Linux hardsect_size */
 232        unsigned int bshift;            /* Shift between 512 and hard sects */
 233};
 234
 235/*
 236 * This is a direct take-off from linux/include/completion.h
 237 * The difference is that I do not wait on this thing, just poll.
 238 * When I want to wait (ub_probe), I just use the stock completion.
 239 *
 240 * Note that INIT_COMPLETION takes no lock. It is correct. But why
 241 * in the bloody hell that thing takes struct instead of pointer to struct
 242 * is quite beyond me. I just copied it from the stock completion.
 243 */
 244struct ub_completion {
 245        unsigned int done;
 246        spinlock_t lock;
 247};
 248
 249static inline void ub_init_completion(struct ub_completion *x)
 250{
 251        x->done = 0;
 252        spin_lock_init(&x->lock);
 253}
 254
 255#define UB_INIT_COMPLETION(x)   ((x).done = 0)
 256
 257static void ub_complete(struct ub_completion *x)
 258{
 259        unsigned long flags;
 260
 261        spin_lock_irqsave(&x->lock, flags);
 262        x->done++;
 263        spin_unlock_irqrestore(&x->lock, flags);
 264}
 265
 266static int ub_is_completed(struct ub_completion *x)
 267{
 268        unsigned long flags;
 269        int ret;
 270
 271        spin_lock_irqsave(&x->lock, flags);
 272        ret = x->done;
 273        spin_unlock_irqrestore(&x->lock, flags);
 274        return ret;
 275}
 276
 277/*
 278 */
 279struct ub_scsi_cmd_queue {
 280        int qlen, qmax;
 281        struct ub_scsi_cmd *head, *tail;
 282};
 283
 284/*
 285 * The block device instance (one per LUN).
 286 */
 287struct ub_lun {
 288        struct ub_dev *udev;
 289        struct list_head link;
 290        struct gendisk *disk;
 291        int id;                         /* Host index */
 292        int num;                        /* LUN number */
 293        char name[16];
 294
 295        int changed;                    /* Media was changed */
 296        int removable;
 297        int readonly;
 298
 299        struct ub_request urq;
 300
 301        /* Use Ingo's mempool if or when we have more than one command. */
 302        /*
 303         * Currently we never need more than one command for the whole device.
 304         * However, giving every LUN a command is a cheap and automatic way
 305         * to enforce fairness between them.
 306         */
 307        int cmda[1];
 308        struct ub_scsi_cmd cmdv[1];
 309
 310        struct ub_capacity capacity; 
 311};
 312
 313/*
 314 * The USB device instance.
 315 */
 316struct ub_dev {
 317        spinlock_t *lock;
 318        atomic_t poison;                /* The USB device is disconnected */
 319        int openc;                      /* protected by ub_lock! */
 320                                        /* kref is too implicit for our taste */
 321        int reset;                      /* Reset is running */
 322        int bad_resid;
 323        unsigned int tagcnt;
 324        char name[12];
 325        struct usb_device *dev;
 326        struct usb_interface *intf;
 327
 328        struct list_head luns;
 329
 330        unsigned int send_bulk_pipe;    /* cached pipe values */
 331        unsigned int recv_bulk_pipe;
 332        unsigned int send_ctrl_pipe;
 333        unsigned int recv_ctrl_pipe;
 334
 335        struct tasklet_struct tasklet;
 336
 337        struct ub_scsi_cmd_queue cmd_queue;
 338        struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
 339        unsigned char top_sense[UB_SENSE_SIZE];
 340
 341        struct ub_completion work_done;
 342        struct urb work_urb;
 343        struct timer_list work_timer;
 344        int last_pipe;                  /* What might need clearing */
 345        __le32 signature;               /* Learned signature */
 346        struct bulk_cb_wrap work_bcb;
 347        struct bulk_cs_wrap work_bcs;
 348        struct usb_ctrlrequest work_cr;
 349
 350        struct work_struct reset_work;
 351        wait_queue_head_t reset_wait;
 352};
 353
 354/*
 355 */
 356static void ub_cleanup(struct ub_dev *sc);
 357static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
 358static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 359    struct ub_scsi_cmd *cmd, struct ub_request *urq);
 360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 361    struct ub_scsi_cmd *cmd, struct ub_request *urq);
 362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 363static void ub_end_rq(struct request *rq, unsigned int status,
 364    unsigned int cmd_len);
 365static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 366    struct ub_request *urq, struct ub_scsi_cmd *cmd);
 367static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 368static void ub_urb_complete(struct urb *urb);
 369static void ub_scsi_action(unsigned long _dev);
 370static void ub_scsi_dispatch(struct ub_dev *sc);
 371static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 372static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 373static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
 374static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 375static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 376static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 377static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 378static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
 379    int stalled_pipe);
 380static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 381static void ub_reset_enter(struct ub_dev *sc, int try);
 382static void ub_reset_task(struct work_struct *work);
 383static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 384static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
 385    struct ub_capacity *ret);
 386static int ub_sync_reset(struct ub_dev *sc);
 387static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
 388static int ub_probe_lun(struct ub_dev *sc, int lnum);
 389
 390/*
 391 */
 392#ifdef CONFIG_USB_LIBUSUAL
 393
 394#define ub_usb_ids  usb_storage_usb_ids
 395#else
 396
 397static struct usb_device_id ub_usb_ids[] = {
 398        { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
 399        { }
 400};
 401
 402MODULE_DEVICE_TABLE(usb, ub_usb_ids);
 403#endif /* CONFIG_USB_LIBUSUAL */
 404
 405/*
 406 * Find me a way to identify "next free minor" for add_disk(),
 407 * and the array disappears the next day. However, the number of
 408 * hosts has something to do with the naming and /proc/partitions.
 409 * This has to be thought out in detail before changing.
 410 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
 411 */
 412#define UB_MAX_HOSTS  26
 413static char ub_hostv[UB_MAX_HOSTS];
 414
 415#define UB_QLOCK_NUM 5
 416static spinlock_t ub_qlockv[UB_QLOCK_NUM];
 417static int ub_qlock_next = 0;
 418
 419static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
 420
 421/*
 422 * The id allocator.
 423 *
 424 * This also stores the host for indexing by minor, which is somewhat dirty.
 425 */
 426static int ub_id_get(void)
 427{
 428        unsigned long flags;
 429        int i;
 430
 431        spin_lock_irqsave(&ub_lock, flags);
 432        for (i = 0; i < UB_MAX_HOSTS; i++) {
 433                if (ub_hostv[i] == 0) {
 434                        ub_hostv[i] = 1;
 435                        spin_unlock_irqrestore(&ub_lock, flags);
 436                        return i;
 437                }
 438        }
 439        spin_unlock_irqrestore(&ub_lock, flags);
 440        return -1;
 441}
 442
 443static void ub_id_put(int id)
 444{
 445        unsigned long flags;
 446
 447        if (id < 0 || id >= UB_MAX_HOSTS) {
 448                printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
 449                return;
 450        }
 451
 452        spin_lock_irqsave(&ub_lock, flags);
 453        if (ub_hostv[id] == 0) {
 454                spin_unlock_irqrestore(&ub_lock, flags);
 455                printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
 456                return;
 457        }
 458        ub_hostv[id] = 0;
 459        spin_unlock_irqrestore(&ub_lock, flags);
 460}
 461
 462/*
 463 * This is necessitated by the fact that blk_cleanup_queue does not
 464 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
 465 * Since our blk_init_queue() passes a spinlock common with ub_dev,
 466 * we have life time issues when ub_cleanup frees ub_dev.
 467 */
 468static spinlock_t *ub_next_lock(void)
 469{
 470        unsigned long flags;
 471        spinlock_t *ret;
 472
 473        spin_lock_irqsave(&ub_lock, flags);
 474        ret = &ub_qlockv[ub_qlock_next];
 475        ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
 476        spin_unlock_irqrestore(&ub_lock, flags);
 477        return ret;
 478}
 479
 480/*
 481 * Downcount for deallocation. This rides on two assumptions:
 482 *  - once something is poisoned, its refcount cannot grow
 483 *  - opens cannot happen at this time (del_gendisk was done)
 484 * If the above is true, we can drop the lock, which we need for
 485 * blk_cleanup_queue(): the silly thing may attempt to sleep.
 486 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
 487 */
 488static void ub_put(struct ub_dev *sc)
 489{
 490        unsigned long flags;
 491
 492        spin_lock_irqsave(&ub_lock, flags);
 493        --sc->openc;
 494        if (sc->openc == 0 && atomic_read(&sc->poison)) {
 495                spin_unlock_irqrestore(&ub_lock, flags);
 496                ub_cleanup(sc);
 497        } else {
 498                spin_unlock_irqrestore(&ub_lock, flags);
 499        }
 500}
 501
 502/*
 503 * Final cleanup and deallocation.
 504 */
 505static void ub_cleanup(struct ub_dev *sc)
 506{
 507        struct list_head *p;
 508        struct ub_lun *lun;
 509        struct request_queue *q;
 510
 511        while (!list_empty(&sc->luns)) {
 512                p = sc->luns.next;
 513                lun = list_entry(p, struct ub_lun, link);
 514                list_del(p);
 515
 516                /* I don't think queue can be NULL. But... Stolen from sx8.c */
 517                if ((q = lun->disk->queue) != NULL)
 518                        blk_cleanup_queue(q);
 519                /*
 520                 * If we zero disk->private_data BEFORE put_disk, we have
 521                 * to check for NULL all over the place in open, release,
 522                 * check_media and revalidate, because the block level
 523                 * semaphore is well inside the put_disk.
 524                 * But we cannot zero after the call, because *disk is gone.
 525                 * The sd.c is blatantly racy in this area.
 526                 */
 527                /* disk->private_data = NULL; */
 528                put_disk(lun->disk);
 529                lun->disk = NULL;
 530
 531                ub_id_put(lun->id);
 532                kfree(lun);
 533        }
 534
 535        usb_set_intfdata(sc->intf, NULL);
 536        usb_put_intf(sc->intf);
 537        usb_put_dev(sc->dev);
 538        kfree(sc);
 539}
 540
 541/*
 542 * The "command allocator".
 543 */
 544static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
 545{
 546        struct ub_scsi_cmd *ret;
 547
 548        if (lun->cmda[0])
 549                return NULL;
 550        ret = &lun->cmdv[0];
 551        lun->cmda[0] = 1;
 552        return ret;
 553}
 554
 555static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
 556{
 557        if (cmd != &lun->cmdv[0]) {
 558                printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
 559                    lun->name, cmd);
 560                return;
 561        }
 562        if (!lun->cmda[0]) {
 563                printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
 564                return;
 565        }
 566        lun->cmda[0] = 0;
 567}
 568
 569/*
 570 * The command queue.
 571 */
 572static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 573{
 574        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 575
 576        if (t->qlen++ == 0) {
 577                t->head = cmd;
 578                t->tail = cmd;
 579        } else {
 580                t->tail->next = cmd;
 581                t->tail = cmd;
 582        }
 583
 584        if (t->qlen > t->qmax)
 585                t->qmax = t->qlen;
 586}
 587
 588static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 589{
 590        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 591
 592        if (t->qlen++ == 0) {
 593                t->head = cmd;
 594                t->tail = cmd;
 595        } else {
 596                cmd->next = t->head;
 597                t->head = cmd;
 598        }
 599
 600        if (t->qlen > t->qmax)
 601                t->qmax = t->qlen;
 602}
 603
 604static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
 605{
 606        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 607        struct ub_scsi_cmd *cmd;
 608
 609        if (t->qlen == 0)
 610                return NULL;
 611        if (--t->qlen == 0)
 612                t->tail = NULL;
 613        cmd = t->head;
 614        t->head = cmd->next;
 615        cmd->next = NULL;
 616        return cmd;
 617}
 618
 619#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
 620
 621/*
 622 * The request function is our main entry point
 623 */
 624
 625static void ub_request_fn(struct request_queue *q)
 626{
 627        struct ub_lun *lun = q->queuedata;
 628        struct request *rq;
 629
 630        while ((rq = elv_next_request(q)) != NULL) {
 631                if (ub_request_fn_1(lun, rq) != 0) {
 632                        blk_stop_queue(q);
 633                        break;
 634                }
 635        }
 636}
 637
 638static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 639{
 640        struct ub_dev *sc = lun->udev;
 641        struct ub_scsi_cmd *cmd;
 642        struct ub_request *urq;
 643        int n_elem;
 644
 645        if (atomic_read(&sc->poison)) {
 646                blkdev_dequeue_request(rq);
 647                ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
 648                return 0;
 649        }
 650
 651        if (lun->changed && !blk_pc_request(rq)) {
 652                blkdev_dequeue_request(rq);
 653                ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
 654                return 0;
 655        }
 656
 657        if (lun->urq.rq != NULL)
 658                return -1;
 659        if ((cmd = ub_get_cmd(lun)) == NULL)
 660                return -1;
 661        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 662
 663        blkdev_dequeue_request(rq);
 664
 665        urq = &lun->urq;
 666        memset(urq, 0, sizeof(struct ub_request));
 667        urq->rq = rq;
 668
 669        /*
 670         * get scatterlist from block layer
 671         */
 672        sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
 673        n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
 674        if (n_elem < 0) {
 675                /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
 676                printk(KERN_INFO "%s: failed request map (%d)\n",
 677                    lun->name, n_elem);
 678                goto drop;
 679        }
 680        if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
 681                printk(KERN_WARNING "%s: request with %d segments\n",
 682                    lun->name, n_elem);
 683                goto drop;
 684        }
 685        urq->nsg = n_elem;
 686
 687        if (blk_pc_request(rq)) {
 688                ub_cmd_build_packet(sc, lun, cmd, urq);
 689        } else {
 690                ub_cmd_build_block(sc, lun, cmd, urq);
 691        }
 692        cmd->state = UB_CMDST_INIT;
 693        cmd->lun = lun;
 694        cmd->done = ub_rw_cmd_done;
 695        cmd->back = urq;
 696
 697        cmd->tag = sc->tagcnt++;
 698        if (ub_submit_scsi(sc, cmd) != 0)
 699                goto drop;
 700
 701        return 0;
 702
 703drop:
 704        ub_put_cmd(lun, cmd);
 705        ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
 706        return 0;
 707}
 708
 709static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 710    struct ub_scsi_cmd *cmd, struct ub_request *urq)
 711{
 712        struct request *rq = urq->rq;
 713        unsigned int block, nblks;
 714
 715        if (rq_data_dir(rq) == WRITE)
 716                cmd->dir = UB_DIR_WRITE;
 717        else
 718                cmd->dir = UB_DIR_READ;
 719
 720        cmd->nsg = urq->nsg;
 721        memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
 722
 723        /*
 724         * build the command
 725         *
 726         * The call to blk_queue_hardsect_size() guarantees that request
 727         * is aligned, but it is given in terms of 512 byte units, always.
 728         */
 729        block = rq->sector >> lun->capacity.bshift;
 730        nblks = rq->nr_sectors >> lun->capacity.bshift;
 731
 732        cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
 733        /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
 734        cmd->cdb[2] = block >> 24;
 735        cmd->cdb[3] = block >> 16;
 736        cmd->cdb[4] = block >> 8;
 737        cmd->cdb[5] = block;
 738        cmd->cdb[7] = nblks >> 8;
 739        cmd->cdb[8] = nblks;
 740        cmd->cdb_len = 10;
 741
 742        cmd->len = rq->nr_sectors * 512;
 743}
 744
 745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 746    struct ub_scsi_cmd *cmd, struct ub_request *urq)
 747{
 748        struct request *rq = urq->rq;
 749
 750        if (rq->data_len == 0) {
 751                cmd->dir = UB_DIR_NONE;
 752        } else {
 753                if (rq_data_dir(rq) == WRITE)
 754                        cmd->dir = UB_DIR_WRITE;
 755                else
 756                        cmd->dir = UB_DIR_READ;
 757        }
 758
 759        cmd->nsg = urq->nsg;
 760        memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
 761
 762        memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
 763        cmd->cdb_len = rq->cmd_len;
 764
 765        cmd->len = rq->data_len;
 766
 767        /*
 768         * To reapply this to every URB is not as incorrect as it looks.
 769         * In return, we avoid any complicated tracking calculations.
 770         */
 771        cmd->timeo = rq->timeout;
 772}
 773
 774static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 775{
 776        struct ub_lun *lun = cmd->lun;
 777        struct ub_request *urq = cmd->back;
 778        struct request *rq;
 779        unsigned int scsi_status;
 780        unsigned int cmd_len;
 781
 782        rq = urq->rq;
 783
 784        if (cmd->error == 0) {
 785                if (blk_pc_request(rq)) {
 786                        if (cmd->act_len >= rq->data_len)
 787                                rq->data_len = 0;
 788                        else
 789                                rq->data_len -= cmd->act_len;
 790                        scsi_status = 0;
 791                } else {
 792                        if (cmd->act_len != cmd->len) {
 793                                scsi_status = SAM_STAT_CHECK_CONDITION;
 794                        } else {
 795                                scsi_status = 0;
 796                        }
 797                }
 798        } else {
 799                if (blk_pc_request(rq)) {
 800                        /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
 801                        memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
 802                        rq->sense_len = UB_SENSE_SIZE;
 803                        if (sc->top_sense[0] != 0)
 804                                scsi_status = SAM_STAT_CHECK_CONDITION;
 805                        else
 806                                scsi_status = DID_ERROR << 16;
 807                } else {
 808                        if (cmd->error == -EIO &&
 809                            (cmd->key == 0 ||
 810                             cmd->key == MEDIUM_ERROR ||
 811                             cmd->key == UNIT_ATTENTION)) {
 812                                if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
 813                                        return;
 814                        }
 815                        scsi_status = SAM_STAT_CHECK_CONDITION;
 816                }
 817        }
 818
 819        urq->rq = NULL;
 820
 821        cmd_len = cmd->len;
 822        ub_put_cmd(lun, cmd);
 823        ub_end_rq(rq, scsi_status, cmd_len);
 824        blk_start_queue(lun->disk->queue);
 825}
 826
 827static void ub_end_rq(struct request *rq, unsigned int scsi_status,
 828    unsigned int cmd_len)
 829{
 830        int error;
 831        long rqlen;
 832
 833        if (scsi_status == 0) {
 834                error = 0;
 835        } else {
 836                error = -EIO;
 837                rq->errors = scsi_status;
 838        }
 839        rqlen = blk_rq_bytes(rq);    /* Oddly enough, this is the residue. */
 840        if (__blk_end_request(rq, error, cmd_len)) {
 841                printk(KERN_WARNING DRV_NAME
 842                    ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
 843                    blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
 844        }
 845}
 846
 847static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 848    struct ub_request *urq, struct ub_scsi_cmd *cmd)
 849{
 850
 851        if (atomic_read(&sc->poison))
 852                return -ENXIO;
 853
 854        ub_reset_enter(sc, urq->current_try);
 855
 856        if (urq->current_try >= 3)
 857                return -EIO;
 858        urq->current_try++;
 859
 860        /* Remove this if anyone complains of flooding. */
 861        printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
 862            "[sense %x %02x %02x] retry %d\n",
 863            sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
 864            cmd->key, cmd->asc, cmd->ascq, urq->current_try);
 865
 866        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 867        ub_cmd_build_block(sc, lun, cmd, urq);
 868
 869        cmd->state = UB_CMDST_INIT;
 870        cmd->lun = lun;
 871        cmd->done = ub_rw_cmd_done;
 872        cmd->back = urq;
 873
 874        cmd->tag = sc->tagcnt++;
 875
 876#if 0 /* Wasteful */
 877        return ub_submit_scsi(sc, cmd);
 878#else
 879        ub_cmdq_add(sc, cmd);
 880        return 0;
 881#endif
 882}
 883
 884/*
 885 * Submit a regular SCSI operation (not an auto-sense).
 886 *
 887 * The Iron Law of Good Submit Routine is:
 888 * Zero return - callback is done, Nonzero return - callback is not done.
 889 * No exceptions.
 890 *
 891 * Host is assumed locked.
 892 */
 893static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 894{
 895
 896        if (cmd->state != UB_CMDST_INIT ||
 897            (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
 898                return -EINVAL;
 899        }
 900
 901        ub_cmdq_add(sc, cmd);
 902        /*
 903         * We can call ub_scsi_dispatch(sc) right away here, but it's a little
 904         * safer to jump to a tasklet, in case upper layers do something silly.
 905         */
 906        tasklet_schedule(&sc->tasklet);
 907        return 0;
 908}
 909
 910/*
 911 * Submit the first URB for the queued command.
 912 * This function does not deal with queueing in any way.
 913 */
 914static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 915{
 916        struct bulk_cb_wrap *bcb;
 917        int rc;
 918
 919        bcb = &sc->work_bcb;
 920
 921        /*
 922         * ``If the allocation length is eighteen or greater, and a device
 923         * server returns less than eithteen bytes of data, the application
 924         * client should assume that the bytes not transferred would have been
 925         * zeroes had the device server returned those bytes.''
 926         *
 927         * We zero sense for all commands so that when a packet request
 928         * fails it does not return a stale sense.
 929         */
 930        memset(&sc->top_sense, 0, UB_SENSE_SIZE);
 931
 932        /* set up the command wrapper */
 933        bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 934        bcb->Tag = cmd->tag;            /* Endianness is not important */
 935        bcb->DataTransferLength = cpu_to_le32(cmd->len);
 936        bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
 937        bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
 938        bcb->Length = cmd->cdb_len;
 939
 940        /* copy the command payload */
 941        memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
 942
 943        UB_INIT_COMPLETION(sc->work_done);
 944
 945        sc->last_pipe = sc->send_bulk_pipe;
 946        usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
 947            bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
 948
 949        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
 950                /* XXX Clear stalls */
 951                ub_complete(&sc->work_done);
 952                return rc;
 953        }
 954
 955        sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
 956        add_timer(&sc->work_timer);
 957
 958        cmd->state = UB_CMDST_CMD;
 959        return 0;
 960}
 961
 962/*
 963 * Timeout handler.
 964 */
 965static void ub_urb_timeout(unsigned long arg)
 966{
 967        struct ub_dev *sc = (struct ub_dev *) arg;
 968        unsigned long flags;
 969
 970        spin_lock_irqsave(sc->lock, flags);
 971        if (!ub_is_completed(&sc->work_done))
 972                usb_unlink_urb(&sc->work_urb);
 973        spin_unlock_irqrestore(sc->lock, flags);
 974}
 975
 976/*
 977 * Completion routine for the work URB.
 978 *
 979 * This can be called directly from usb_submit_urb (while we have
 980 * the sc->lock taken) and from an interrupt (while we do NOT have
 981 * the sc->lock taken). Therefore, bounce this off to a tasklet.
 982 */
 983static void ub_urb_complete(struct urb *urb)
 984{
 985        struct ub_dev *sc = urb->context;
 986
 987        ub_complete(&sc->work_done);
 988        tasklet_schedule(&sc->tasklet);
 989}
 990
 991static void ub_scsi_action(unsigned long _dev)
 992{
 993        struct ub_dev *sc = (struct ub_dev *) _dev;
 994        unsigned long flags;
 995
 996        spin_lock_irqsave(sc->lock, flags);
 997        ub_scsi_dispatch(sc);
 998        spin_unlock_irqrestore(sc->lock, flags);
 999}
1000
1001static void ub_scsi_dispatch(struct ub_dev *sc)
1002{
1003        struct ub_scsi_cmd *cmd;
1004        int rc;
1005
1006        while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
1007                if (cmd->state == UB_CMDST_DONE) {
1008                        ub_cmdq_pop(sc);
1009                        (*cmd->done)(sc, cmd);
1010                } else if (cmd->state == UB_CMDST_INIT) {
1011                        if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1012                                break;
1013                        cmd->error = rc;
1014                        cmd->state = UB_CMDST_DONE;
1015                } else {
1016                        if (!ub_is_completed(&sc->work_done))
1017                                break;
1018                        del_timer(&sc->work_timer);
1019                        ub_scsi_urb_compl(sc, cmd);
1020                }
1021        }
1022}
1023
1024static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1025{
1026        struct urb *urb = &sc->work_urb;
1027        struct bulk_cs_wrap *bcs;
1028        int endp;
1029        int len;
1030        int rc;
1031
1032        if (atomic_read(&sc->poison)) {
1033                ub_state_done(sc, cmd, -ENODEV);
1034                return;
1035        }
1036
1037        endp = usb_pipeendpoint(sc->last_pipe);
1038        if (usb_pipein(sc->last_pipe))
1039                endp |= USB_DIR_IN;
1040
1041        if (cmd->state == UB_CMDST_CLEAR) {
1042                if (urb->status == -EPIPE) {
1043                        /*
1044                         * STALL while clearning STALL.
1045                         * The control pipe clears itself - nothing to do.
1046                         */
1047                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1048                            sc->name);
1049                        goto Bad_End;
1050                }
1051
1052                /*
1053                 * We ignore the result for the halt clear.
1054                 */
1055
1056                usb_reset_endpoint(sc->dev, endp);
1057
1058                ub_state_sense(sc, cmd);
1059
1060        } else if (cmd->state == UB_CMDST_CLR2STS) {
1061                if (urb->status == -EPIPE) {
1062                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1063                            sc->name);
1064                        goto Bad_End;
1065                }
1066
1067                /*
1068                 * We ignore the result for the halt clear.
1069                 */
1070
1071                usb_reset_endpoint(sc->dev, endp);
1072
1073                ub_state_stat(sc, cmd);
1074
1075        } else if (cmd->state == UB_CMDST_CLRRS) {
1076                if (urb->status == -EPIPE) {
1077                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1078                            sc->name);
1079                        goto Bad_End;
1080                }
1081
1082                /*
1083                 * We ignore the result for the halt clear.
1084                 */
1085
1086                usb_reset_endpoint(sc->dev, endp);
1087
1088                ub_state_stat_counted(sc, cmd);
1089
1090        } else if (cmd->state == UB_CMDST_CMD) {
1091                switch (urb->status) {
1092                case 0:
1093                        break;
1094                case -EOVERFLOW:
1095                        goto Bad_End;
1096                case -EPIPE:
1097                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1098                        if (rc != 0) {
1099                                printk(KERN_NOTICE "%s: "
1100                                    "unable to submit clear (%d)\n",
1101                                    sc->name, rc);
1102                                /*
1103                                 * This is typically ENOMEM or some other such shit.
1104                                 * Retrying is pointless. Just do Bad End on it...
1105                                 */
1106                                ub_state_done(sc, cmd, rc);
1107                                return;
1108                        }
1109                        cmd->state = UB_CMDST_CLEAR;
1110                        return;
1111                case -ESHUTDOWN:        /* unplug */
1112                case -EILSEQ:           /* unplug timeout on uhci */
1113                        ub_state_done(sc, cmd, -ENODEV);
1114                        return;
1115                default:
1116                        goto Bad_End;
1117                }
1118                if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1119                        goto Bad_End;
1120                }
1121
1122                if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1123                        ub_state_stat(sc, cmd);
1124                        return;
1125                }
1126
1127                // udelay(125);         // usb-storage has this
1128                ub_data_start(sc, cmd);
1129
1130        } else if (cmd->state == UB_CMDST_DATA) {
1131                if (urb->status == -EPIPE) {
1132                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1133                        if (rc != 0) {
1134                                printk(KERN_NOTICE "%s: "
1135                                    "unable to submit clear (%d)\n",
1136                                    sc->name, rc);
1137                                ub_state_done(sc, cmd, rc);
1138                                return;
1139                        }
1140                        cmd->state = UB_CMDST_CLR2STS;
1141                        return;
1142                }
1143                if (urb->status == -EOVERFLOW) {
1144                        /*
1145                         * A babble? Failure, but we must transfer CSW now.
1146                         */
1147                        cmd->error = -EOVERFLOW;        /* A cheap trick... */
1148                        ub_state_stat(sc, cmd);
1149                        return;
1150                }
1151
1152                if (cmd->dir == UB_DIR_WRITE) {
1153                        /*
1154                         * Do not continue writes in case of a failure.
1155                         * Doing so would cause sectors to be mixed up,
1156                         * which is worse than sectors lost.
1157                         *
1158                         * We must try to read the CSW, or many devices
1159                         * get confused.
1160                         */
1161                        len = urb->actual_length;
1162                        if (urb->status != 0 ||
1163                            len != cmd->sgv[cmd->current_sg].length) {
1164                                cmd->act_len += len;
1165
1166                                cmd->error = -EIO;
1167                                ub_state_stat(sc, cmd);
1168                                return;
1169                        }
1170
1171                } else {
1172                        /*
1173                         * If an error occurs on read, we record it, and
1174                         * continue to fetch data in order to avoid bubble.
1175                         *
1176                         * As a small shortcut, we stop if we detect that
1177                         * a CSW mixed into data.
1178                         */
1179                        if (urb->status != 0)
1180                                cmd->error = -EIO;
1181
1182                        len = urb->actual_length;
1183                        if (urb->status != 0 ||
1184                            len != cmd->sgv[cmd->current_sg].length) {
1185                                if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1186                                        goto Bad_End;
1187                        }
1188                }
1189
1190                cmd->act_len += urb->actual_length;
1191
1192                if (++cmd->current_sg < cmd->nsg) {
1193                        ub_data_start(sc, cmd);
1194                        return;
1195                }
1196                ub_state_stat(sc, cmd);
1197
1198        } else if (cmd->state == UB_CMDST_STAT) {
1199                if (urb->status == -EPIPE) {
1200                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1201                        if (rc != 0) {
1202                                printk(KERN_NOTICE "%s: "
1203                                    "unable to submit clear (%d)\n",
1204                                    sc->name, rc);
1205                                ub_state_done(sc, cmd, rc);
1206                                return;
1207                        }
1208
1209                        /*
1210                         * Having a stall when getting CSW is an error, so
1211                         * make sure uppper levels are not oblivious to it.
1212                         */
1213                        cmd->error = -EIO;              /* A cheap trick... */
1214
1215                        cmd->state = UB_CMDST_CLRRS;
1216                        return;
1217                }
1218
1219                /* Catch everything, including -EOVERFLOW and other nasties. */
1220                if (urb->status != 0)
1221                        goto Bad_End;
1222
1223                if (urb->actual_length == 0) {
1224                        ub_state_stat_counted(sc, cmd);
1225                        return;
1226                }
1227
1228                /*
1229                 * Check the returned Bulk protocol status.
1230                 * The status block has to be validated first.
1231                 */
1232
1233                bcs = &sc->work_bcs;
1234
1235                if (sc->signature == cpu_to_le32(0)) {
1236                        /*
1237                         * This is the first reply, so do not perform the check.
1238                         * Instead, remember the signature the device uses
1239                         * for future checks. But do not allow a nul.
1240                         */
1241                        sc->signature = bcs->Signature;
1242                        if (sc->signature == cpu_to_le32(0)) {
1243                                ub_state_stat_counted(sc, cmd);
1244                                return;
1245                        }
1246                } else {
1247                        if (bcs->Signature != sc->signature) {
1248                                ub_state_stat_counted(sc, cmd);
1249                                return;
1250                        }
1251                }
1252
1253                if (bcs->Tag != cmd->tag) {
1254                        /*
1255                         * This usually happens when we disagree with the
1256                         * device's microcode about something. For instance,
1257                         * a few of them throw this after timeouts. They buffer
1258                         * commands and reply at commands we timed out before.
1259                         * Without flushing these replies we loop forever.
1260                         */
1261                        ub_state_stat_counted(sc, cmd);
1262                        return;
1263                }
1264
1265                if (!sc->bad_resid) {
1266                        len = le32_to_cpu(bcs->Residue);
1267                        if (len != cmd->len - cmd->act_len) {
1268                                /*
1269                                 * Only start ignoring if this cmd ended well.
1270                                 */
1271                                if (cmd->len == cmd->act_len) {
1272                                        printk(KERN_NOTICE "%s: "
1273                                            "bad residual %d of %d, ignoring\n",
1274                                            sc->name, len, cmd->len);
1275                                        sc->bad_resid = 1;
1276                                }
1277                        }
1278                }
1279
1280                switch (bcs->Status) {
1281                case US_BULK_STAT_OK:
1282                        break;
1283                case US_BULK_STAT_FAIL:
1284                        ub_state_sense(sc, cmd);
1285                        return;
1286                case US_BULK_STAT_PHASE:
1287                        goto Bad_End;
1288                default:
1289                        printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1290                            sc->name, bcs->Status);
1291                        ub_state_done(sc, cmd, -EINVAL);
1292                        return;
1293                }
1294
1295                /* Not zeroing error to preserve a babble indicator */
1296                if (cmd->error != 0) {
1297                        ub_state_sense(sc, cmd);
1298                        return;
1299                }
1300                cmd->state = UB_CMDST_DONE;
1301                ub_cmdq_pop(sc);
1302                (*cmd->done)(sc, cmd);
1303
1304        } else if (cmd->state == UB_CMDST_SENSE) {
1305                ub_state_done(sc, cmd, -EIO);
1306
1307        } else {
1308                printk(KERN_WARNING "%s: wrong command state %d\n",
1309                    sc->name, cmd->state);
1310                ub_state_done(sc, cmd, -EINVAL);
1311                return;
1312        }
1313        return;
1314
1315Bad_End: /* Little Excel is dead */
1316        ub_state_done(sc, cmd, -EIO);
1317}
1318
1319/*
1320 * Factorization helper for the command state machine:
1321 * Initiate a data segment transfer.
1322 */
1323static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1324{
1325        struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1326        int pipe;
1327        int rc;
1328
1329        UB_INIT_COMPLETION(sc->work_done);
1330
1331        if (cmd->dir == UB_DIR_READ)
1332                pipe = sc->recv_bulk_pipe;
1333        else
1334                pipe = sc->send_bulk_pipe;
1335        sc->last_pipe = pipe;
1336        usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1337            sg->length, ub_urb_complete, sc);
1338
1339        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1340                /* XXX Clear stalls */
1341                ub_complete(&sc->work_done);
1342                ub_state_done(sc, cmd, rc);
1343                return;
1344        }
1345
1346        if (cmd->timeo)
1347                sc->work_timer.expires = jiffies + cmd->timeo;
1348        else
1349                sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1350        add_timer(&sc->work_timer);
1351
1352        cmd->state = UB_CMDST_DATA;
1353}
1354
1355/*
1356 * Factorization helper for the command state machine:
1357 * Finish the command.
1358 */
1359static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1360{
1361
1362        cmd->error = rc;
1363        cmd->state = UB_CMDST_DONE;
1364        ub_cmdq_pop(sc);
1365        (*cmd->done)(sc, cmd);
1366}
1367
1368/*
1369 * Factorization helper for the command state machine:
1370 * Submit a CSW read.
1371 */
1372static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1373{
1374        int rc;
1375
1376        UB_INIT_COMPLETION(sc->work_done);
1377
1378        sc->last_pipe = sc->recv_bulk_pipe;
1379        usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1380            &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1381
1382        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1383                /* XXX Clear stalls */
1384                ub_complete(&sc->work_done);
1385                ub_state_done(sc, cmd, rc);
1386                return -1;
1387        }
1388
1389        if (cmd->timeo)
1390                sc->work_timer.expires = jiffies + cmd->timeo;
1391        else
1392                sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1393        add_timer(&sc->work_timer);
1394        return 0;
1395}
1396
1397/*
1398 * Factorization helper for the command state machine:
1399 * Submit a CSW read and go to STAT state.
1400 */
1401static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1402{
1403
1404        if (__ub_state_stat(sc, cmd) != 0)
1405                return;
1406
1407        cmd->stat_count = 0;
1408        cmd->state = UB_CMDST_STAT;
1409}
1410
1411/*
1412 * Factorization helper for the command state machine:
1413 * Submit a CSW read and go to STAT state with counter (along [C] path).
1414 */
1415static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1416{
1417
1418        if (++cmd->stat_count >= 4) {
1419                ub_state_sense(sc, cmd);
1420                return;
1421        }
1422
1423        if (__ub_state_stat(sc, cmd) != 0)
1424                return;
1425
1426        cmd->state = UB_CMDST_STAT;
1427}
1428
1429/*
1430 * Factorization helper for the command state machine:
1431 * Submit a REQUEST SENSE and go to SENSE state.
1432 */
1433static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1434{
1435        struct ub_scsi_cmd *scmd;
1436        struct scatterlist *sg;
1437        int rc;
1438
1439        if (cmd->cdb[0] == REQUEST_SENSE) {
1440                rc = -EPIPE;
1441                goto error;
1442        }
1443
1444        scmd = &sc->top_rqs_cmd;
1445        memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1446        scmd->cdb[0] = REQUEST_SENSE;
1447        scmd->cdb[4] = UB_SENSE_SIZE;
1448        scmd->cdb_len = 6;
1449        scmd->dir = UB_DIR_READ;
1450        scmd->state = UB_CMDST_INIT;
1451        scmd->nsg = 1;
1452        sg = &scmd->sgv[0];
1453        sg_init_table(sg, UB_MAX_REQ_SG);
1454        sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1455                        (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1456        scmd->len = UB_SENSE_SIZE;
1457        scmd->lun = cmd->lun;
1458        scmd->done = ub_top_sense_done;
1459        scmd->back = cmd;
1460
1461        scmd->tag = sc->tagcnt++;
1462
1463        cmd->state = UB_CMDST_SENSE;
1464
1465        ub_cmdq_insert(sc, scmd);
1466        return;
1467
1468error:
1469        ub_state_done(sc, cmd, rc);
1470}
1471
1472/*
1473 * A helper for the command's state machine:
1474 * Submit a stall clear.
1475 */
1476static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1477    int stalled_pipe)
1478{
1479        int endp;
1480        struct usb_ctrlrequest *cr;
1481        int rc;
1482
1483        endp = usb_pipeendpoint(stalled_pipe);
1484        if (usb_pipein (stalled_pipe))
1485                endp |= USB_DIR_IN;
1486
1487        cr = &sc->work_cr;
1488        cr->bRequestType = USB_RECIP_ENDPOINT;
1489        cr->bRequest = USB_REQ_CLEAR_FEATURE;
1490        cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1491        cr->wIndex = cpu_to_le16(endp);
1492        cr->wLength = cpu_to_le16(0);
1493
1494        UB_INIT_COMPLETION(sc->work_done);
1495
1496        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1497            (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1498
1499        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1500                ub_complete(&sc->work_done);
1501                return rc;
1502        }
1503
1504        sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1505        add_timer(&sc->work_timer);
1506        return 0;
1507}
1508
1509/*
1510 */
1511static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1512{
1513        unsigned char *sense = sc->top_sense;
1514        struct ub_scsi_cmd *cmd;
1515
1516        /*
1517         * Find the command which triggered the unit attention or a check,
1518         * save the sense into it, and advance its state machine.
1519         */
1520        if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1521                printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1522                return;
1523        }
1524        if (cmd != scmd->back) {
1525                printk(KERN_WARNING "%s: "
1526                    "sense done for wrong command 0x%x\n",
1527                    sc->name, cmd->tag);
1528                return;
1529        }
1530        if (cmd->state != UB_CMDST_SENSE) {
1531                printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1532                    sc->name, cmd->state);
1533                return;
1534        }
1535
1536        /*
1537         * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1538         */
1539        cmd->key = sense[2] & 0x0F;
1540        cmd->asc = sense[12];
1541        cmd->ascq = sense[13];
1542
1543        ub_scsi_urb_compl(sc, cmd);
1544}
1545
1546/*
1547 * Reset management
1548 */
1549
1550static void ub_reset_enter(struct ub_dev *sc, int try)
1551{
1552
1553        if (sc->reset) {
1554                /* This happens often on multi-LUN devices. */
1555                return;
1556        }
1557        sc->reset = try + 1;
1558
1559#if 0 /* Not needed because the disconnect waits for us. */
1560        unsigned long flags;
1561        spin_lock_irqsave(&ub_lock, flags);
1562        sc->openc++;
1563        spin_unlock_irqrestore(&ub_lock, flags);
1564#endif
1565
1566#if 0 /* We let them stop themselves. */
1567        struct ub_lun *lun;
1568        list_for_each_entry(lun, &sc->luns, link) {
1569                blk_stop_queue(lun->disk->queue);
1570        }
1571#endif
1572
1573        schedule_work(&sc->reset_work);
1574}
1575
1576static void ub_reset_task(struct work_struct *work)
1577{
1578        struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1579        unsigned long flags;
1580        struct ub_lun *lun;
1581        int rc;
1582
1583        if (!sc->reset) {
1584                printk(KERN_WARNING "%s: Running reset unrequested\n",
1585                    sc->name);
1586                return;
1587        }
1588
1589        if (atomic_read(&sc->poison)) {
1590                ;
1591        } else if ((sc->reset & 1) == 0) {
1592                ub_sync_reset(sc);
1593                msleep(700);    /* usb-storage sleeps 6s (!) */
1594                ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1595                ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1596        } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1597                ;
1598        } else {
1599                rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1600                if (rc < 0) {
1601                        printk(KERN_NOTICE
1602                            "%s: usb_lock_device_for_reset failed (%d)\n",
1603                            sc->name, rc);
1604                } else {
1605                        rc = usb_reset_device(sc->dev);
1606                        if (rc < 0) {
1607                                printk(KERN_NOTICE "%s: "
1608                                    "usb_lock_device_for_reset failed (%d)\n",
1609                                    sc->name, rc);
1610                        }
1611                        usb_unlock_device(sc->dev);
1612                }
1613        }
1614
1615        /*
1616         * In theory, no commands can be running while reset is active,
1617         * so nobody can ask for another reset, and so we do not need any
1618         * queues of resets or anything. We do need a spinlock though,
1619         * to interact with block layer.
1620         */
1621        spin_lock_irqsave(sc->lock, flags);
1622        sc->reset = 0;
1623        tasklet_schedule(&sc->tasklet);
1624        list_for_each_entry(lun, &sc->luns, link) {
1625                blk_start_queue(lun->disk->queue);
1626        }
1627        wake_up(&sc->reset_wait);
1628        spin_unlock_irqrestore(sc->lock, flags);
1629}
1630
1631/*
1632 * XXX Reset brackets are too much hassle to implement, so just stub them
1633 * in order to prevent forced unbinding (which deadlocks solid when our
1634 * ->disconnect method waits for the reset to complete and this kills keventd).
1635 *
1636 * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
1637 * or else the post_reset is invoked, and restats I/O on a locked device.
1638 */
1639static int ub_pre_reset(struct usb_interface *iface) {
1640        return 0;
1641}
1642
1643static int ub_post_reset(struct usb_interface *iface) {
1644        return 0;
1645}
1646
1647/*
1648 * This is called from a process context.
1649 */
1650static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1651{
1652
1653        lun->readonly = 0;      /* XXX Query this from the device */
1654
1655        lun->capacity.nsec = 0;
1656        lun->capacity.bsize = 512;
1657        lun->capacity.bshift = 0;
1658
1659        if (ub_sync_tur(sc, lun) != 0)
1660                return;                 /* Not ready */
1661        lun->changed = 0;
1662
1663        if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1664                /*
1665                 * The retry here means something is wrong, either with the
1666                 * device, with the transport, or with our code.
1667                 * We keep this because sd.c has retries for capacity.
1668                 */
1669                if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1670                        lun->capacity.nsec = 0;
1671                        lun->capacity.bsize = 512;
1672                        lun->capacity.bshift = 0;
1673                }
1674        }
1675}
1676
1677/*
1678 * The open funcion.
1679 * This is mostly needed to keep refcounting, but also to support
1680 * media checks on removable media drives.
1681 */
1682static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1683{
1684        struct ub_lun *lun = bdev->bd_disk->private_data;
1685        struct ub_dev *sc = lun->udev;
1686        unsigned long flags;
1687        int rc;
1688
1689        spin_lock_irqsave(&ub_lock, flags);
1690        if (atomic_read(&sc->poison)) {
1691                spin_unlock_irqrestore(&ub_lock, flags);
1692                return -ENXIO;
1693        }
1694        sc->openc++;
1695        spin_unlock_irqrestore(&ub_lock, flags);
1696
1697        if (lun->removable || lun->readonly)
1698                check_disk_change(bdev);
1699
1700        /*
1701         * The sd.c considers ->media_present and ->changed not equivalent,
1702         * under some pretty murky conditions (a failure of READ CAPACITY).
1703         * We may need it one day.
1704         */
1705        if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1706                rc = -ENOMEDIUM;
1707                goto err_open;
1708        }
1709
1710        if (lun->readonly && (mode & FMODE_WRITE)) {
1711                rc = -EROFS;
1712                goto err_open;
1713        }
1714
1715        return 0;
1716
1717err_open:
1718        ub_put(sc);
1719        return rc;
1720}
1721
1722/*
1723 */
1724static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1725{
1726        struct ub_lun *lun = disk->private_data;
1727        struct ub_dev *sc = lun->udev;
1728
1729        ub_put(sc);
1730        return 0;
1731}
1732
1733/*
1734 * The ioctl interface.
1735 */
1736static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1737    unsigned int cmd, unsigned long arg)
1738{
1739        struct gendisk *disk = bdev->bd_disk;
1740        void __user *usermem = (void __user *) arg;
1741
1742        return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1743}
1744
1745/*
1746 * This is called by check_disk_change if we reported a media change.
1747 * The main onjective here is to discover the features of the media such as
1748 * the capacity, read-only status, etc. USB storage generally does not
1749 * need to be spun up, but if we needed it, this would be the place.
1750 *
1751 * This call can sleep.
1752 *
1753 * The return code is not used.
1754 */
1755static int ub_bd_revalidate(struct gendisk *disk)
1756{
1757        struct ub_lun *lun = disk->private_data;
1758
1759        ub_revalidate(lun->udev, lun);
1760
1761        /* XXX Support sector size switching like in sr.c */
1762        blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1763        set_capacity(disk, lun->capacity.nsec);
1764        // set_disk_ro(sdkp->disk, lun->readonly);
1765
1766        return 0;
1767}
1768
1769/*
1770 * The check is called by the block layer to verify if the media
1771 * is still available. It is supposed to be harmless, lightweight and
1772 * non-intrusive in case the media was not changed.
1773 *
1774 * This call can sleep.
1775 *
1776 * The return code is bool!
1777 */
1778static int ub_bd_media_changed(struct gendisk *disk)
1779{
1780        struct ub_lun *lun = disk->private_data;
1781
1782        if (!lun->removable)
1783                return 0;
1784
1785        /*
1786         * We clean checks always after every command, so this is not
1787         * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1788         * the device is actually not ready with operator or software
1789         * intervention required. One dangerous item might be a drive which
1790         * spins itself down, and come the time to write dirty pages, this
1791         * will fail, then block layer discards the data. Since we never
1792         * spin drives up, such devices simply cannot be used with ub anyway.
1793         */
1794        if (ub_sync_tur(lun->udev, lun) != 0) {
1795                lun->changed = 1;
1796                return 1;
1797        }
1798
1799        return lun->changed;
1800}
1801
1802static struct block_device_operations ub_bd_fops = {
1803        .owner          = THIS_MODULE,
1804        .open           = ub_bd_open,
1805        .release        = ub_bd_release,
1806        .locked_ioctl   = ub_bd_ioctl,
1807        .media_changed  = ub_bd_media_changed,
1808        .revalidate_disk = ub_bd_revalidate,
1809};
1810
1811/*
1812 * Common ->done routine for commands executed synchronously.
1813 */
1814static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1815{
1816        struct completion *cop = cmd->back;
1817        complete(cop);
1818}
1819
1820/*
1821 * Test if the device has a check condition on it, synchronously.
1822 */
1823static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1824{
1825        struct ub_scsi_cmd *cmd;
1826        enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1827        unsigned long flags;
1828        struct completion compl;
1829        int rc;
1830
1831        init_completion(&compl);
1832
1833        rc = -ENOMEM;
1834        if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1835                goto err_alloc;
1836
1837        cmd->cdb[0] = TEST_UNIT_READY;
1838        cmd->cdb_len = 6;
1839        cmd->dir = UB_DIR_NONE;
1840        cmd->state = UB_CMDST_INIT;
1841        cmd->lun = lun;                 /* This may be NULL, but that's ok */
1842        cmd->done = ub_probe_done;
1843        cmd->back = &compl;
1844
1845        spin_lock_irqsave(sc->lock, flags);
1846        cmd->tag = sc->tagcnt++;
1847
1848        rc = ub_submit_scsi(sc, cmd);
1849        spin_unlock_irqrestore(sc->lock, flags);
1850
1851        if (rc != 0)
1852                goto err_submit;
1853
1854        wait_for_completion(&compl);
1855
1856        rc = cmd->error;
1857
1858        if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
1859                rc = cmd->key;
1860
1861err_submit:
1862        kfree(cmd);
1863err_alloc:
1864        return rc;
1865}
1866
1867/*
1868 * Read the SCSI capacity synchronously (for probing).
1869 */
1870static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1871    struct ub_capacity *ret)
1872{
1873        struct ub_scsi_cmd *cmd;
1874        struct scatterlist *sg;
1875        char *p;
1876        enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1877        unsigned long flags;
1878        unsigned int bsize, shift;
1879        unsigned long nsec;
1880        struct completion compl;
1881        int rc;
1882
1883        init_completion(&compl);
1884
1885        rc = -ENOMEM;
1886        if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1887                goto err_alloc;
1888        p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1889
1890        cmd->cdb[0] = 0x25;
1891        cmd->cdb_len = 10;
1892        cmd->dir = UB_DIR_READ;
1893        cmd->state = UB_CMDST_INIT;
1894        cmd->nsg = 1;
1895        sg = &cmd->sgv[0];
1896        sg_init_table(sg, UB_MAX_REQ_SG);
1897        sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1898        cmd->len = 8;
1899        cmd->lun = lun;
1900        cmd->done = ub_probe_done;
1901        cmd->back = &compl;
1902
1903        spin_lock_irqsave(sc->lock, flags);
1904        cmd->tag = sc->tagcnt++;
1905
1906        rc = ub_submit_scsi(sc, cmd);
1907        spin_unlock_irqrestore(sc->lock, flags);
1908
1909        if (rc != 0)
1910                goto err_submit;
1911
1912        wait_for_completion(&compl);
1913
1914        if (cmd->error != 0) {
1915                rc = -EIO;
1916                goto err_read;
1917        }
1918        if (cmd->act_len != 8) {
1919                rc = -EIO;
1920                goto err_read;
1921        }
1922
1923        /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1924        nsec = be32_to_cpu(*(__be32 *)p) + 1;
1925        bsize = be32_to_cpu(*(__be32 *)(p + 4));
1926        switch (bsize) {
1927        case 512:       shift = 0;      break;
1928        case 1024:      shift = 1;      break;
1929        case 2048:      shift = 2;      break;
1930        case 4096:      shift = 3;      break;
1931        default:
1932                rc = -EDOM;
1933                goto err_inv_bsize;
1934        }
1935
1936        ret->bsize = bsize;
1937        ret->bshift = shift;
1938        ret->nsec = nsec << shift;
1939        rc = 0;
1940
1941err_inv_bsize:
1942err_read:
1943err_submit:
1944        kfree(cmd);
1945err_alloc:
1946        return rc;
1947}
1948
1949/*
1950 */
1951static void ub_probe_urb_complete(struct urb *urb)
1952{
1953        struct completion *cop = urb->context;
1954        complete(cop);
1955}
1956
1957static void ub_probe_timeout(unsigned long arg)
1958{
1959        struct completion *cop = (struct completion *) arg;
1960        complete(cop);
1961}
1962
1963/*
1964 * Reset with a Bulk reset.
1965 */
1966static int ub_sync_reset(struct ub_dev *sc)
1967{
1968        int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1969        struct usb_ctrlrequest *cr;
1970        struct completion compl;
1971        struct timer_list timer;
1972        int rc;
1973
1974        init_completion(&compl);
1975
1976        cr = &sc->work_cr;
1977        cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1978        cr->bRequest = US_BULK_RESET_REQUEST;
1979        cr->wValue = cpu_to_le16(0);
1980        cr->wIndex = cpu_to_le16(ifnum);
1981        cr->wLength = cpu_to_le16(0);
1982
1983        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1984            (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1985
1986        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1987                printk(KERN_WARNING
1988                     "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1989                return rc;
1990        }
1991
1992        init_timer(&timer);
1993        timer.function = ub_probe_timeout;
1994        timer.data = (unsigned long) &compl;
1995        timer.expires = jiffies + UB_CTRL_TIMEOUT;
1996        add_timer(&timer);
1997
1998        wait_for_completion(&compl);
1999
2000        del_timer_sync(&timer);
2001        usb_kill_urb(&sc->work_urb);
2002
2003        return sc->work_urb.status;
2004}
2005
2006/*
2007 * Get number of LUNs by the way of Bulk GetMaxLUN command.
2008 */
2009static int ub_sync_getmaxlun(struct ub_dev *sc)
2010{
2011        int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2012        unsigned char *p;
2013        enum { ALLOC_SIZE = 1 };
2014        struct usb_ctrlrequest *cr;
2015        struct completion compl;
2016        struct timer_list timer;
2017        int nluns;
2018        int rc;
2019
2020        init_completion(&compl);
2021
2022        rc = -ENOMEM;
2023        if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2024                goto err_alloc;
2025        *p = 55;
2026
2027        cr = &sc->work_cr;
2028        cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2029        cr->bRequest = US_BULK_GET_MAX_LUN;
2030        cr->wValue = cpu_to_le16(0);
2031        cr->wIndex = cpu_to_le16(ifnum);
2032        cr->wLength = cpu_to_le16(1);
2033
2034        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2035            (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2036
2037        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2038                goto err_submit;
2039
2040        init_timer(&timer);
2041        timer.function = ub_probe_timeout;
2042        timer.data = (unsigned long) &compl;
2043        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2044        add_timer(&timer);
2045
2046        wait_for_completion(&compl);
2047
2048        del_timer_sync(&timer);
2049        usb_kill_urb(&sc->work_urb);
2050
2051        if ((rc = sc->work_urb.status) < 0)
2052                goto err_io;
2053
2054        if (sc->work_urb.actual_length != 1) {
2055                nluns = 0;
2056        } else {
2057                if ((nluns = *p) == 55) {
2058                        nluns = 0;
2059                } else {
2060                        /* GetMaxLUN returns the maximum LUN number */
2061                        nluns += 1;
2062                        if (nluns > UB_MAX_LUNS)
2063                                nluns = UB_MAX_LUNS;
2064                }
2065        }
2066
2067        kfree(p);
2068        return nluns;
2069
2070err_io:
2071err_submit:
2072        kfree(p);
2073err_alloc:
2074        return rc;
2075}
2076
2077/*
2078 * Clear initial stalls.
2079 */
2080static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2081{
2082        int endp;
2083        struct usb_ctrlrequest *cr;
2084        struct completion compl;
2085        struct timer_list timer;
2086        int rc;
2087
2088        init_completion(&compl);
2089
2090        endp = usb_pipeendpoint(stalled_pipe);
2091        if (usb_pipein (stalled_pipe))
2092                endp |= USB_DIR_IN;
2093
2094        cr = &sc->work_cr;
2095        cr->bRequestType = USB_RECIP_ENDPOINT;
2096        cr->bRequest = USB_REQ_CLEAR_FEATURE;
2097        cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2098        cr->wIndex = cpu_to_le16(endp);
2099        cr->wLength = cpu_to_le16(0);
2100
2101        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2102            (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2103
2104        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2105                printk(KERN_WARNING
2106                     "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2107                return rc;
2108        }
2109
2110        init_timer(&timer);
2111        timer.function = ub_probe_timeout;
2112        timer.data = (unsigned long) &compl;
2113        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2114        add_timer(&timer);
2115
2116        wait_for_completion(&compl);
2117
2118        del_timer_sync(&timer);
2119        usb_kill_urb(&sc->work_urb);
2120
2121        usb_reset_endpoint(sc->dev, endp);
2122
2123        return 0;
2124}
2125
2126/*
2127 * Get the pipe settings.
2128 */
2129static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2130    struct usb_interface *intf)
2131{
2132        struct usb_host_interface *altsetting = intf->cur_altsetting;
2133        struct usb_endpoint_descriptor *ep_in = NULL;
2134        struct usb_endpoint_descriptor *ep_out = NULL;
2135        struct usb_endpoint_descriptor *ep;
2136        int i;
2137
2138        /*
2139         * Find the endpoints we need.
2140         * We are expecting a minimum of 2 endpoints - in and out (bulk).
2141         * We will ignore any others.
2142         */
2143        for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2144                ep = &altsetting->endpoint[i].desc;
2145
2146                /* Is it a BULK endpoint? */
2147                if (usb_endpoint_xfer_bulk(ep)) {
2148                        /* BULK in or out? */
2149                        if (usb_endpoint_dir_in(ep)) {
2150                                if (ep_in == NULL)
2151                                        ep_in = ep;
2152                        } else {
2153                                if (ep_out == NULL)
2154                                        ep_out = ep;
2155                        }
2156                }
2157        }
2158
2159        if (ep_in == NULL || ep_out == NULL) {
2160                printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2161                return -ENODEV;
2162        }
2163
2164        /* Calculate and store the pipe values */
2165        sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2166        sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2167        sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2168                usb_endpoint_num(ep_out));
2169        sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2170                usb_endpoint_num(ep_in));
2171
2172        return 0;
2173}
2174
2175/*
2176 * Probing is done in the process context, which allows us to cheat
2177 * and not to build a state machine for the discovery.
2178 */
2179static int ub_probe(struct usb_interface *intf,
2180    const struct usb_device_id *dev_id)
2181{
2182        struct ub_dev *sc;
2183        int nluns;
2184        int rc;
2185        int i;
2186
2187        if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2188                return -ENXIO;
2189
2190        rc = -ENOMEM;
2191        if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2192                goto err_core;
2193        sc->lock = ub_next_lock();
2194        INIT_LIST_HEAD(&sc->luns);
2195        usb_init_urb(&sc->work_urb);
2196        tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2197        atomic_set(&sc->poison, 0);
2198        INIT_WORK(&sc->reset_work, ub_reset_task);
2199        init_waitqueue_head(&sc->reset_wait);
2200
2201        init_timer(&sc->work_timer);
2202        sc->work_timer.data = (unsigned long) sc;
2203        sc->work_timer.function = ub_urb_timeout;
2204
2205        ub_init_completion(&sc->work_done);
2206        sc->work_done.done = 1;         /* A little yuk, but oh well... */
2207
2208        sc->dev = interface_to_usbdev(intf);
2209        sc->intf = intf;
2210        // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2211        usb_set_intfdata(intf, sc);
2212        usb_get_dev(sc->dev);
2213        /*
2214         * Since we give the interface struct to the block level through
2215         * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2216         * oopses on close after a disconnect (kernels 2.6.16 and up).
2217         */
2218        usb_get_intf(sc->intf);
2219
2220        snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2221            sc->dev->bus->busnum, sc->dev->devnum);
2222
2223        /* XXX Verify that we can handle the device (from descriptors) */
2224
2225        if (ub_get_pipes(sc, sc->dev, intf) != 0)
2226                goto err_dev_desc;
2227
2228        /*
2229         * At this point, all USB initialization is done, do upper layer.
2230         * We really hate halfway initialized structures, so from the
2231         * invariants perspective, this ub_dev is fully constructed at
2232         * this point.
2233         */
2234
2235        /*
2236         * This is needed to clear toggles. It is a problem only if we do
2237         * `rmmod ub && modprobe ub` without disconnects, but we like that.
2238         */
2239#if 0 /* iPod Mini fails if we do this (big white iPod works) */
2240        ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2241        ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2242#endif
2243
2244        /*
2245         * The way this is used by the startup code is a little specific.
2246         * A SCSI check causes a USB stall. Our common case code sees it
2247         * and clears the check, after which the device is ready for use.
2248         * But if a check was not present, any command other than
2249         * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2250         *
2251         * If we neglect to clear the SCSI check, the first real command fails
2252         * (which is the capacity readout). We clear that and retry, but why
2253         * causing spurious retries for no reason.
2254         *
2255         * Revalidation may start with its own TEST_UNIT_READY, but that one
2256         * has to succeed, so we clear checks with an additional one here.
2257         * In any case it's not our business how revaliadation is implemented.
2258         */
2259        for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2260                if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2261                if (rc != 0x6) break;
2262                msleep(10);
2263        }
2264
2265        nluns = 1;
2266        for (i = 0; i < 3; i++) {
2267                if ((rc = ub_sync_getmaxlun(sc)) < 0)
2268                        break;
2269                if (rc != 0) {
2270                        nluns = rc;
2271                        break;
2272                }
2273                msleep(100);
2274        }
2275
2276        for (i = 0; i < nluns; i++) {
2277                ub_probe_lun(sc, i);
2278        }
2279        return 0;
2280
2281err_dev_desc:
2282        usb_set_intfdata(intf, NULL);
2283        usb_put_intf(sc->intf);
2284        usb_put_dev(sc->dev);
2285        kfree(sc);
2286err_core:
2287        return rc;
2288}
2289
2290static int ub_probe_lun(struct ub_dev *sc, int lnum)
2291{
2292        struct ub_lun *lun;
2293        struct request_queue *q;
2294        struct gendisk *disk;
2295        int rc;
2296
2297        rc = -ENOMEM;
2298        if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2299                goto err_alloc;
2300        lun->num = lnum;
2301
2302        rc = -ENOSR;
2303        if ((lun->id = ub_id_get()) == -1)
2304                goto err_id;
2305
2306        lun->udev = sc;
2307
2308        snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2309            lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2310
2311        lun->removable = 1;             /* XXX Query this from the device */
2312        lun->changed = 1;               /* ub_revalidate clears only */
2313        ub_revalidate(sc, lun);
2314
2315        rc = -ENOMEM;
2316        if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2317                goto err_diskalloc;
2318
2319        sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2320        disk->major = UB_MAJOR;
2321        disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2322        disk->fops = &ub_bd_fops;
2323        disk->private_data = lun;
2324        disk->driverfs_dev = &sc->intf->dev;
2325
2326        rc = -ENOMEM;
2327        if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2328                goto err_blkqinit;
2329
2330        disk->queue = q;
2331
2332        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2333        blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2334        blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2335        blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2336        blk_queue_max_sectors(q, UB_MAX_SECTORS);
2337        blk_queue_hardsect_size(q, lun->capacity.bsize);
2338
2339        lun->disk = disk;
2340        q->queuedata = lun;
2341        list_add(&lun->link, &sc->luns);
2342
2343        set_capacity(disk, lun->capacity.nsec);
2344        if (lun->removable)
2345                disk->flags |= GENHD_FL_REMOVABLE;
2346
2347        add_disk(disk);
2348
2349        return 0;
2350
2351err_blkqinit:
2352        put_disk(disk);
2353err_diskalloc:
2354        ub_id_put(lun->id);
2355err_id:
2356        kfree(lun);
2357err_alloc:
2358        return rc;
2359}
2360
2361static void ub_disconnect(struct usb_interface *intf)
2362{
2363        struct ub_dev *sc = usb_get_intfdata(intf);
2364        struct ub_lun *lun;
2365        unsigned long flags;
2366
2367        /*
2368         * Prevent ub_bd_release from pulling the rug from under us.
2369         * XXX This is starting to look like a kref.
2370         * XXX Why not to take this ref at probe time?
2371         */
2372        spin_lock_irqsave(&ub_lock, flags);
2373        sc->openc++;
2374        spin_unlock_irqrestore(&ub_lock, flags);
2375
2376        /*
2377         * Fence stall clearings, operations triggered by unlinkings and so on.
2378         * We do not attempt to unlink any URBs, because we do not trust the
2379         * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2380         */
2381        atomic_set(&sc->poison, 1);
2382
2383        /*
2384         * Wait for reset to end, if any.
2385         */
2386        wait_event(sc->reset_wait, !sc->reset);
2387
2388        /*
2389         * Blow away queued commands.
2390         *
2391         * Actually, this never works, because before we get here
2392         * the HCD terminates outstanding URB(s). It causes our
2393         * SCSI command queue to advance, commands fail to submit,
2394         * and the whole queue drains. So, we just use this code to
2395         * print warnings.
2396         */
2397        spin_lock_irqsave(sc->lock, flags);
2398        {
2399                struct ub_scsi_cmd *cmd;
2400                int cnt = 0;
2401                while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2402                        cmd->error = -ENOTCONN;
2403                        cmd->state = UB_CMDST_DONE;
2404                        ub_cmdq_pop(sc);
2405                        (*cmd->done)(sc, cmd);
2406                        cnt++;
2407                }
2408                if (cnt != 0) {
2409                        printk(KERN_WARNING "%s: "
2410                            "%d was queued after shutdown\n", sc->name, cnt);
2411                }
2412        }
2413        spin_unlock_irqrestore(sc->lock, flags);
2414
2415        /*
2416         * Unregister the upper layer.
2417         */
2418        list_for_each_entry(lun, &sc->luns, link) {
2419                del_gendisk(lun->disk);
2420                /*
2421                 * I wish I could do:
2422                 *    queue_flag_set(QUEUE_FLAG_DEAD, q);
2423                 * As it is, we rely on our internal poisoning and let
2424                 * the upper levels to spin furiously failing all the I/O.
2425                 */
2426        }
2427
2428        /*
2429         * Testing for -EINPROGRESS is always a bug, so we are bending
2430         * the rules a little.
2431         */
2432        spin_lock_irqsave(sc->lock, flags);
2433        if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2434                printk(KERN_WARNING "%s: "
2435                    "URB is active after disconnect\n", sc->name);
2436        }
2437        spin_unlock_irqrestore(sc->lock, flags);
2438
2439        /*
2440         * There is virtually no chance that other CPU runs a timeout so long
2441         * after ub_urb_complete should have called del_timer, but only if HCD
2442         * didn't forget to deliver a callback on unlink.
2443         */
2444        del_timer_sync(&sc->work_timer);
2445
2446        /*
2447         * At this point there must be no commands coming from anyone
2448         * and no URBs left in transit.
2449         */
2450
2451        ub_put(sc);
2452}
2453
2454static struct usb_driver ub_driver = {
2455        .name =         "ub",
2456        .probe =        ub_probe,
2457        .disconnect =   ub_disconnect,
2458        .id_table =     ub_usb_ids,
2459        .pre_reset =    ub_pre_reset,
2460        .post_reset =   ub_post_reset,
2461};
2462
2463static int __init ub_init(void)
2464{
2465        int rc;
2466        int i;
2467
2468        for (i = 0; i < UB_QLOCK_NUM; i++)
2469                spin_lock_init(&ub_qlockv[i]);
2470
2471        if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2472                goto err_regblkdev;
2473
2474        if ((rc = usb_register(&ub_driver)) != 0)
2475                goto err_register;
2476
2477        usb_usual_set_present(USB_US_TYPE_UB);
2478        return 0;
2479
2480err_register:
2481        unregister_blkdev(UB_MAJOR, DRV_NAME);
2482err_regblkdev:
2483        return rc;
2484}
2485
2486static void __exit ub_exit(void)
2487{
2488        usb_deregister(&ub_driver);
2489
2490        unregister_blkdev(UB_MAJOR, DRV_NAME);
2491        usb_usual_clear_present(USB_US_TYPE_UB);
2492}
2493
2494module_init(ub_init);
2495module_exit(ub_exit);
2496
2497MODULE_LICENSE("GPL");
2498