linux/drivers/scsi/virtio_scsi.c
<<
>>
Prefs
   1/*
   2 * Virtio SCSI HBA driver
   3 *
   4 * Copyright IBM Corp. 2010
   5 * Copyright Red Hat, Inc. 2011
   6 *
   7 * Authors:
   8 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
   9 *  Paolo Bonzini   <pbonzini@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/mempool.h>
  19#include <linux/virtio.h>
  20#include <linux/virtio_ids.h>
  21#include <linux/virtio_config.h>
  22#include <linux/virtio_scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_device.h>
  25#include <scsi/scsi_cmnd.h>
  26
  27#define VIRTIO_SCSI_MEMPOOL_SZ 64
  28#define VIRTIO_SCSI_EVENT_LEN 8
  29
  30/* Command queue element */
  31struct virtio_scsi_cmd {
  32        struct scsi_cmnd *sc;
  33        struct completion *comp;
  34        union {
  35                struct virtio_scsi_cmd_req       cmd;
  36                struct virtio_scsi_ctrl_tmf_req  tmf;
  37                struct virtio_scsi_ctrl_an_req   an;
  38        } req;
  39        union {
  40                struct virtio_scsi_cmd_resp      cmd;
  41                struct virtio_scsi_ctrl_tmf_resp tmf;
  42                struct virtio_scsi_ctrl_an_resp  an;
  43                struct virtio_scsi_event         evt;
  44        } resp;
  45} ____cacheline_aligned_in_smp;
  46
  47struct virtio_scsi_event_node {
  48        struct virtio_scsi *vscsi;
  49        struct virtio_scsi_event event;
  50        struct work_struct work;
  51};
  52
  53struct virtio_scsi_vq {
  54        /* Protects vq */
  55        spinlock_t vq_lock;
  56
  57        struct virtqueue *vq;
  58};
  59
  60/* Per-target queue state */
  61struct virtio_scsi_target_state {
  62        /* Protects sg.  Lock hierarchy is tgt_lock -> vq_lock.  */
  63        spinlock_t tgt_lock;
  64
  65        /* For sglist construction when adding commands to the virtqueue.  */
  66        struct scatterlist sg[];
  67};
  68
  69/* Driver instance state */
  70struct virtio_scsi {
  71        struct virtio_device *vdev;
  72
  73        struct virtio_scsi_vq ctrl_vq;
  74        struct virtio_scsi_vq event_vq;
  75        struct virtio_scsi_vq req_vq;
  76
  77        /* Get some buffers ready for event vq */
  78        struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
  79
  80        struct virtio_scsi_target_state *tgt[];
  81};
  82
  83static struct kmem_cache *virtscsi_cmd_cache;
  84static mempool_t *virtscsi_cmd_pool;
  85
  86static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
  87{
  88        return vdev->priv;
  89}
  90
  91static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
  92{
  93        if (!resid)
  94                return;
  95
  96        if (!scsi_bidi_cmnd(sc)) {
  97                scsi_set_resid(sc, resid);
  98                return;
  99        }
 100
 101        scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
 102        scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
 103}
 104
 105/**
 106 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
 107 *
 108 * Called with vq_lock held.
 109 */
 110static void virtscsi_complete_cmd(void *buf)
 111{
 112        struct virtio_scsi_cmd *cmd = buf;
 113        struct scsi_cmnd *sc = cmd->sc;
 114        struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
 115
 116        dev_dbg(&sc->device->sdev_gendev,
 117                "cmd %p response %u status %#02x sense_len %u\n",
 118                sc, resp->response, resp->status, resp->sense_len);
 119
 120        sc->result = resp->status;
 121        virtscsi_compute_resid(sc, resp->resid);
 122        switch (resp->response) {
 123        case VIRTIO_SCSI_S_OK:
 124                set_host_byte(sc, DID_OK);
 125                break;
 126        case VIRTIO_SCSI_S_OVERRUN:
 127                set_host_byte(sc, DID_ERROR);
 128                break;
 129        case VIRTIO_SCSI_S_ABORTED:
 130                set_host_byte(sc, DID_ABORT);
 131                break;
 132        case VIRTIO_SCSI_S_BAD_TARGET:
 133                set_host_byte(sc, DID_BAD_TARGET);
 134                break;
 135        case VIRTIO_SCSI_S_RESET:
 136                set_host_byte(sc, DID_RESET);
 137                break;
 138        case VIRTIO_SCSI_S_BUSY:
 139                set_host_byte(sc, DID_BUS_BUSY);
 140                break;
 141        case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
 142                set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 143                break;
 144        case VIRTIO_SCSI_S_TARGET_FAILURE:
 145                set_host_byte(sc, DID_TARGET_FAILURE);
 146                break;
 147        case VIRTIO_SCSI_S_NEXUS_FAILURE:
 148                set_host_byte(sc, DID_NEXUS_FAILURE);
 149                break;
 150        default:
 151                scmd_printk(KERN_WARNING, sc, "Unknown response %d",
 152                            resp->response);
 153                /* fall through */
 154        case VIRTIO_SCSI_S_FAILURE:
 155                set_host_byte(sc, DID_ERROR);
 156                break;
 157        }
 158
 159        WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
 160        if (sc->sense_buffer) {
 161                memcpy(sc->sense_buffer, resp->sense,
 162                       min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
 163                if (resp->sense_len)
 164                        set_driver_byte(sc, DRIVER_SENSE);
 165        }
 166
 167        mempool_free(cmd, virtscsi_cmd_pool);
 168        sc->scsi_done(sc);
 169}
 170
 171static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
 172{
 173        void *buf;
 174        unsigned int len;
 175
 176        do {
 177                virtqueue_disable_cb(vq);
 178                while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
 179                        fn(buf);
 180        } while (!virtqueue_enable_cb(vq));
 181}
 182
 183static void virtscsi_req_done(struct virtqueue *vq)
 184{
 185        struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
 186        struct virtio_scsi *vscsi = shost_priv(sh);
 187        unsigned long flags;
 188
 189        spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
 190        virtscsi_vq_done(vq, virtscsi_complete_cmd);
 191        spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
 192};
 193
 194static void virtscsi_complete_free(void *buf)
 195{
 196        struct virtio_scsi_cmd *cmd = buf;
 197
 198        if (cmd->comp)
 199                complete_all(cmd->comp);
 200        else
 201                mempool_free(cmd, virtscsi_cmd_pool);
 202}
 203
 204static void virtscsi_ctrl_done(struct virtqueue *vq)
 205{
 206        struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
 207        struct virtio_scsi *vscsi = shost_priv(sh);
 208        unsigned long flags;
 209
 210        spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
 211        virtscsi_vq_done(vq, virtscsi_complete_free);
 212        spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
 213};
 214
 215static int virtscsi_kick_event(struct virtio_scsi *vscsi,
 216                               struct virtio_scsi_event_node *event_node)
 217{
 218        int ret;
 219        struct scatterlist sg;
 220        unsigned long flags;
 221
 222        sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
 223
 224        spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
 225
 226        ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
 227        if (ret >= 0)
 228                virtqueue_kick(vscsi->event_vq.vq);
 229
 230        spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
 231
 232        return ret;
 233}
 234
 235static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
 236{
 237        int i;
 238
 239        for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
 240                vscsi->event_list[i].vscsi = vscsi;
 241                virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
 242        }
 243
 244        return 0;
 245}
 246
 247static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
 248{
 249        int i;
 250
 251        for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
 252                cancel_work_sync(&vscsi->event_list[i].work);
 253}
 254
 255static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
 256                                                struct virtio_scsi_event *event)
 257{
 258        struct scsi_device *sdev;
 259        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
 260        unsigned int target = event->lun[1];
 261        unsigned int lun = (event->lun[2] << 8) | event->lun[3];
 262
 263        switch (event->reason) {
 264        case VIRTIO_SCSI_EVT_RESET_RESCAN:
 265                scsi_add_device(shost, 0, target, lun);
 266                break;
 267        case VIRTIO_SCSI_EVT_RESET_REMOVED:
 268                sdev = scsi_device_lookup(shost, 0, target, lun);
 269                if (sdev) {
 270                        scsi_remove_device(sdev);
 271                        scsi_device_put(sdev);
 272                } else {
 273                        pr_err("SCSI device %d 0 %d %d not found\n",
 274                                shost->host_no, target, lun);
 275                }
 276                break;
 277        default:
 278                pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
 279        }
 280}
 281
 282static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
 283                                         struct virtio_scsi_event *event)
 284{
 285        struct scsi_device *sdev;
 286        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
 287        unsigned int target = event->lun[1];
 288        unsigned int lun = (event->lun[2] << 8) | event->lun[3];
 289        u8 asc = event->reason & 255;
 290        u8 ascq = event->reason >> 8;
 291
 292        sdev = scsi_device_lookup(shost, 0, target, lun);
 293        if (!sdev) {
 294                pr_err("SCSI device %d 0 %d %d not found\n",
 295                        shost->host_no, target, lun);
 296                return;
 297        }
 298
 299        /* Handle "Parameters changed", "Mode parameters changed", and
 300           "Capacity data has changed".  */
 301        if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
 302                scsi_rescan_device(&sdev->sdev_gendev);
 303
 304        scsi_device_put(sdev);
 305}
 306
 307static void virtscsi_handle_event(struct work_struct *work)
 308{
 309        struct virtio_scsi_event_node *event_node =
 310                container_of(work, struct virtio_scsi_event_node, work);
 311        struct virtio_scsi *vscsi = event_node->vscsi;
 312        struct virtio_scsi_event *event = &event_node->event;
 313
 314        if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
 315                event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
 316                scsi_scan_host(virtio_scsi_host(vscsi->vdev));
 317        }
 318
 319        switch (event->event) {
 320        case VIRTIO_SCSI_T_NO_EVENT:
 321                break;
 322        case VIRTIO_SCSI_T_TRANSPORT_RESET:
 323                virtscsi_handle_transport_reset(vscsi, event);
 324                break;
 325        case VIRTIO_SCSI_T_PARAM_CHANGE:
 326                virtscsi_handle_param_change(vscsi, event);
 327                break;
 328        default:
 329                pr_err("Unsupport virtio scsi event %x\n", event->event);
 330        }
 331        virtscsi_kick_event(vscsi, event_node);
 332}
 333
 334static void virtscsi_complete_event(void *buf)
 335{
 336        struct virtio_scsi_event_node *event_node = buf;
 337
 338        INIT_WORK(&event_node->work, virtscsi_handle_event);
 339        schedule_work(&event_node->work);
 340}
 341
 342static void virtscsi_event_done(struct virtqueue *vq)
 343{
 344        struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
 345        struct virtio_scsi *vscsi = shost_priv(sh);
 346        unsigned long flags;
 347
 348        spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
 349        virtscsi_vq_done(vq, virtscsi_complete_event);
 350        spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
 351};
 352
 353static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
 354                             struct scsi_data_buffer *sdb)
 355{
 356        struct sg_table *table = &sdb->table;
 357        struct scatterlist *sg_elem;
 358        unsigned int idx = *p_idx;
 359        int i;
 360
 361        for_each_sg(table->sgl, sg_elem, table->nents, i)
 362                sg[idx++] = *sg_elem;
 363
 364        *p_idx = idx;
 365}
 366
 367/**
 368 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
 369 * @vscsi       : virtio_scsi state
 370 * @cmd         : command structure
 371 * @out_num     : number of read-only elements
 372 * @in_num      : number of write-only elements
 373 * @req_size    : size of the request buffer
 374 * @resp_size   : size of the response buffer
 375 *
 376 * Called with tgt_lock held.
 377 */
 378static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
 379                             struct virtio_scsi_cmd *cmd,
 380                             unsigned *out_num, unsigned *in_num,
 381                             size_t req_size, size_t resp_size)
 382{
 383        struct scsi_cmnd *sc = cmd->sc;
 384        struct scatterlist *sg = tgt->sg;
 385        unsigned int idx = 0;
 386
 387        /* Request header.  */
 388        sg_set_buf(&sg[idx++], &cmd->req, req_size);
 389
 390        /* Data-out buffer.  */
 391        if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
 392                virtscsi_map_sgl(sg, &idx, scsi_out(sc));
 393
 394        *out_num = idx;
 395
 396        /* Response header.  */
 397        sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
 398
 399        /* Data-in buffer */
 400        if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
 401                virtscsi_map_sgl(sg, &idx, scsi_in(sc));
 402
 403        *in_num = idx - *out_num;
 404}
 405
 406static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
 407                             struct virtio_scsi_vq *vq,
 408                             struct virtio_scsi_cmd *cmd,
 409                             size_t req_size, size_t resp_size, gfp_t gfp)
 410{
 411        unsigned int out_num, in_num;
 412        unsigned long flags;
 413        int ret;
 414
 415        spin_lock_irqsave(&tgt->tgt_lock, flags);
 416        virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
 417
 418        spin_lock(&vq->vq_lock);
 419        ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
 420        spin_unlock(&tgt->tgt_lock);
 421        if (ret >= 0)
 422                ret = virtqueue_kick_prepare(vq->vq);
 423
 424        spin_unlock_irqrestore(&vq->vq_lock, flags);
 425
 426        if (ret > 0)
 427                virtqueue_notify(vq->vq);
 428        return ret;
 429}
 430
 431static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 432{
 433        struct virtio_scsi *vscsi = shost_priv(sh);
 434        struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
 435        struct virtio_scsi_cmd *cmd;
 436        int ret;
 437
 438        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
 439        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
 440
 441        /* TODO: check feature bit and fail if unsupported?  */
 442        BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
 443
 444        dev_dbg(&sc->device->sdev_gendev,
 445                "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
 446
 447        ret = SCSI_MLQUEUE_HOST_BUSY;
 448        cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
 449        if (!cmd)
 450                goto out;
 451
 452        memset(cmd, 0, sizeof(*cmd));
 453        cmd->sc = sc;
 454        cmd->req.cmd = (struct virtio_scsi_cmd_req){
 455                .lun[0] = 1,
 456                .lun[1] = sc->device->id,
 457                .lun[2] = (sc->device->lun >> 8) | 0x40,
 458                .lun[3] = sc->device->lun & 0xff,
 459                .tag = (unsigned long)sc,
 460                .task_attr = VIRTIO_SCSI_S_SIMPLE,
 461                .prio = 0,
 462                .crn = 0,
 463        };
 464
 465        BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
 466        memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
 467
 468        if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
 469                              sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
 470                              GFP_ATOMIC) >= 0)
 471                ret = 0;
 472
 473out:
 474        return ret;
 475}
 476
 477static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
 478{
 479        DECLARE_COMPLETION_ONSTACK(comp);
 480        struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
 481        int ret = FAILED;
 482
 483        cmd->comp = &comp;
 484        if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
 485                              sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
 486                              GFP_NOIO) < 0)
 487                goto out;
 488
 489        wait_for_completion(&comp);
 490        if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
 491            cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
 492                ret = SUCCESS;
 493
 494out:
 495        mempool_free(cmd, virtscsi_cmd_pool);
 496        return ret;
 497}
 498
 499static int virtscsi_device_reset(struct scsi_cmnd *sc)
 500{
 501        struct virtio_scsi *vscsi = shost_priv(sc->device->host);
 502        struct virtio_scsi_cmd *cmd;
 503
 504        sdev_printk(KERN_INFO, sc->device, "device reset\n");
 505        cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
 506        if (!cmd)
 507                return FAILED;
 508
 509        memset(cmd, 0, sizeof(*cmd));
 510        cmd->sc = sc;
 511        cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
 512                .type = VIRTIO_SCSI_T_TMF,
 513                .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
 514                .lun[0] = 1,
 515                .lun[1] = sc->device->id,
 516                .lun[2] = (sc->device->lun >> 8) | 0x40,
 517                .lun[3] = sc->device->lun & 0xff,
 518        };
 519        return virtscsi_tmf(vscsi, cmd);
 520}
 521
 522static int virtscsi_abort(struct scsi_cmnd *sc)
 523{
 524        struct virtio_scsi *vscsi = shost_priv(sc->device->host);
 525        struct virtio_scsi_cmd *cmd;
 526
 527        scmd_printk(KERN_INFO, sc, "abort\n");
 528        cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
 529        if (!cmd)
 530                return FAILED;
 531
 532        memset(cmd, 0, sizeof(*cmd));
 533        cmd->sc = sc;
 534        cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
 535                .type = VIRTIO_SCSI_T_TMF,
 536                .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
 537                .lun[0] = 1,
 538                .lun[1] = sc->device->id,
 539                .lun[2] = (sc->device->lun >> 8) | 0x40,
 540                .lun[3] = sc->device->lun & 0xff,
 541                .tag = (unsigned long)sc,
 542        };
 543        return virtscsi_tmf(vscsi, cmd);
 544}
 545
 546static struct scsi_host_template virtscsi_host_template = {
 547        .module = THIS_MODULE,
 548        .name = "Virtio SCSI HBA",
 549        .proc_name = "virtio_scsi",
 550        .queuecommand = virtscsi_queuecommand,
 551        .this_id = -1,
 552        .eh_abort_handler = virtscsi_abort,
 553        .eh_device_reset_handler = virtscsi_device_reset,
 554
 555        .can_queue = 1024,
 556        .dma_boundary = UINT_MAX,
 557        .use_clustering = ENABLE_CLUSTERING,
 558};
 559
 560#define virtscsi_config_get(vdev, fld) \
 561        ({ \
 562                typeof(((struct virtio_scsi_config *)0)->fld) __val; \
 563                vdev->config->get(vdev, \
 564                                  offsetof(struct virtio_scsi_config, fld), \
 565                                  &__val, sizeof(__val)); \
 566                __val; \
 567        })
 568
 569#define virtscsi_config_set(vdev, fld, val) \
 570        (void)({ \
 571                typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
 572                vdev->config->set(vdev, \
 573                                  offsetof(struct virtio_scsi_config, fld), \
 574                                  &__val, sizeof(__val)); \
 575        })
 576
 577static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
 578                             struct virtqueue *vq)
 579{
 580        spin_lock_init(&virtscsi_vq->vq_lock);
 581        virtscsi_vq->vq = vq;
 582}
 583
 584static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
 585        struct virtio_device *vdev, int sg_elems)
 586{
 587        struct virtio_scsi_target_state *tgt;
 588        gfp_t gfp_mask = GFP_KERNEL;
 589
 590        /* We need extra sg elements at head and tail.  */
 591        tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
 592                      gfp_mask);
 593
 594        if (!tgt)
 595                return NULL;
 596
 597        spin_lock_init(&tgt->tgt_lock);
 598        sg_init_table(tgt->sg, sg_elems + 2);
 599        return tgt;
 600}
 601
 602static void virtscsi_scan(struct virtio_device *vdev)
 603{
 604        struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
 605
 606        scsi_scan_host(shost);
 607}
 608
 609static void virtscsi_remove_vqs(struct virtio_device *vdev)
 610{
 611        struct Scsi_Host *sh = virtio_scsi_host(vdev);
 612        struct virtio_scsi *vscsi = shost_priv(sh);
 613        u32 i, num_targets;
 614
 615        /* Stop all the virtqueues. */
 616        vdev->config->reset(vdev);
 617
 618        num_targets = sh->max_id;
 619        for (i = 0; i < num_targets; i++) {
 620                kfree(vscsi->tgt[i]);
 621                vscsi->tgt[i] = NULL;
 622        }
 623
 624        vdev->config->del_vqs(vdev);
 625}
 626
 627static int virtscsi_init(struct virtio_device *vdev,
 628                         struct virtio_scsi *vscsi, int num_targets)
 629{
 630        int err;
 631        struct virtqueue *vqs[3];
 632        u32 i, sg_elems;
 633
 634        vq_callback_t *callbacks[] = {
 635                virtscsi_ctrl_done,
 636                virtscsi_event_done,
 637                virtscsi_req_done
 638        };
 639        const char *names[] = {
 640                "control",
 641                "event",
 642                "request"
 643        };
 644
 645        /* Discover virtqueues and write information to configuration.  */
 646        err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
 647        if (err)
 648                return err;
 649
 650        virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
 651        virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
 652        virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
 653
 654        virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
 655        virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
 656
 657        if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
 658                virtscsi_kick_event_all(vscsi);
 659
 660        /* We need to know how many segments before we allocate.  */
 661        sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
 662
 663        for (i = 0; i < num_targets; i++) {
 664                vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
 665                if (!vscsi->tgt[i]) {
 666                        err = -ENOMEM;
 667                        goto out;
 668                }
 669        }
 670        err = 0;
 671
 672out:
 673        if (err)
 674                virtscsi_remove_vqs(vdev);
 675        return err;
 676}
 677
 678static int __devinit virtscsi_probe(struct virtio_device *vdev)
 679{
 680        struct Scsi_Host *shost;
 681        struct virtio_scsi *vscsi;
 682        int err;
 683        u32 sg_elems, num_targets;
 684        u32 cmd_per_lun;
 685
 686        /* Allocate memory and link the structs together.  */
 687        num_targets = virtscsi_config_get(vdev, max_target) + 1;
 688        shost = scsi_host_alloc(&virtscsi_host_template,
 689                sizeof(*vscsi)
 690                + num_targets * sizeof(struct virtio_scsi_target_state));
 691
 692        if (!shost)
 693                return -ENOMEM;
 694
 695        sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
 696        shost->sg_tablesize = sg_elems;
 697        vscsi = shost_priv(shost);
 698        vscsi->vdev = vdev;
 699        vdev->priv = shost;
 700
 701        err = virtscsi_init(vdev, vscsi, num_targets);
 702        if (err)
 703                goto virtscsi_init_failed;
 704
 705        cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
 706        shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
 707        shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
 708
 709        /* LUNs > 256 are reported with format 1, so they go in the range
 710         * 16640-32767.
 711         */
 712        shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
 713        shost->max_id = num_targets;
 714        shost->max_channel = 0;
 715        shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
 716        err = scsi_add_host(shost, &vdev->dev);
 717        if (err)
 718                goto scsi_add_host_failed;
 719        /*
 720         * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
 721         * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
 722         */
 723        return 0;
 724
 725scsi_add_host_failed:
 726        vdev->config->del_vqs(vdev);
 727virtscsi_init_failed:
 728        scsi_host_put(shost);
 729        return err;
 730}
 731
 732static void __devexit virtscsi_remove(struct virtio_device *vdev)
 733{
 734        struct Scsi_Host *shost = virtio_scsi_host(vdev);
 735        struct virtio_scsi *vscsi = shost_priv(shost);
 736
 737        if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
 738                virtscsi_cancel_event_work(vscsi);
 739
 740        scsi_remove_host(shost);
 741
 742        virtscsi_remove_vqs(vdev);
 743        scsi_host_put(shost);
 744}
 745
 746#ifdef CONFIG_PM
 747static int virtscsi_freeze(struct virtio_device *vdev)
 748{
 749        virtscsi_remove_vqs(vdev);
 750        return 0;
 751}
 752
 753static int virtscsi_restore(struct virtio_device *vdev)
 754{
 755        struct Scsi_Host *sh = virtio_scsi_host(vdev);
 756        struct virtio_scsi *vscsi = shost_priv(sh);
 757
 758        return virtscsi_init(vdev, vscsi, sh->max_id);
 759}
 760#endif
 761
 762static struct virtio_device_id id_table[] = {
 763        { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
 764        { 0 },
 765};
 766
 767static unsigned int features[] = {
 768        VIRTIO_SCSI_F_HOTPLUG,
 769        VIRTIO_SCSI_F_CHANGE,
 770};
 771
 772static struct virtio_driver virtio_scsi_driver = {
 773        .feature_table = features,
 774        .feature_table_size = ARRAY_SIZE(features),
 775        .driver.name = KBUILD_MODNAME,
 776        .driver.owner = THIS_MODULE,
 777        .id_table = id_table,
 778        .probe = virtscsi_probe,
 779        .scan = virtscsi_scan,
 780#ifdef CONFIG_PM
 781        .freeze = virtscsi_freeze,
 782        .restore = virtscsi_restore,
 783#endif
 784        .remove = __devexit_p(virtscsi_remove),
 785};
 786
 787static int __init init(void)
 788{
 789        int ret = -ENOMEM;
 790
 791        virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
 792        if (!virtscsi_cmd_cache) {
 793                printk(KERN_ERR "kmem_cache_create() for "
 794                                "virtscsi_cmd_cache failed\n");
 795                goto error;
 796        }
 797
 798
 799        virtscsi_cmd_pool =
 800                mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
 801                                         virtscsi_cmd_cache);
 802        if (!virtscsi_cmd_pool) {
 803                printk(KERN_ERR "mempool_create() for"
 804                                "virtscsi_cmd_pool failed\n");
 805                goto error;
 806        }
 807        ret = register_virtio_driver(&virtio_scsi_driver);
 808        if (ret < 0)
 809                goto error;
 810
 811        return 0;
 812
 813error:
 814        if (virtscsi_cmd_pool) {
 815                mempool_destroy(virtscsi_cmd_pool);
 816                virtscsi_cmd_pool = NULL;
 817        }
 818        if (virtscsi_cmd_cache) {
 819                kmem_cache_destroy(virtscsi_cmd_cache);
 820                virtscsi_cmd_cache = NULL;
 821        }
 822        return ret;
 823}
 824
 825static void __exit fini(void)
 826{
 827        unregister_virtio_driver(&virtio_scsi_driver);
 828        mempool_destroy(virtscsi_cmd_pool);
 829        kmem_cache_destroy(virtscsi_cmd_cache);
 830}
 831module_init(init);
 832module_exit(fini);
 833
 834MODULE_DEVICE_TABLE(virtio, id_table);
 835MODULE_DESCRIPTION("Virtio SCSI HBA driver");
 836MODULE_LICENSE("GPL");
 837
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.