linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/miscdevice.h>
  39#include <asm/unaligned.h>
  40#include <scsi/scsi.h>
  41#include <scsi/scsi_tcq.h>
  42#include <target/target_core_base.h>
  43#include <target/target_core_fabric.h>
  44#include <target/target_core_fabric_configfs.h>
  45#include <target/target_core_configfs.h>
  46#include <target/configfs_macros.h>
  47#include <linux/vhost.h>
  48#include <linux/virtio_scsi.h>
  49#include <linux/llist.h>
  50#include <linux/bitmap.h>
  51
  52#include "vhost.h"
  53
  54#define TCM_VHOST_VERSION  "v0.1"
  55#define TCM_VHOST_NAMELEN 256
  56#define TCM_VHOST_MAX_CDB_SIZE 32
  57
  58struct vhost_scsi_inflight {
  59        /* Wait for the flush operation to finish */
  60        struct completion comp;
  61        /* Refcount for the inflight reqs */
  62        struct kref kref;
  63};
  64
  65struct tcm_vhost_cmd {
  66        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  67        int tvc_vq_desc;
  68        /* virtio-scsi initiator task attribute */
  69        int tvc_task_attr;
  70        /* virtio-scsi initiator data direction */
  71        enum dma_data_direction tvc_data_direction;
  72        /* Expected data transfer length from virtio-scsi header */
  73        u32 tvc_exp_data_len;
  74        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  75        u64 tvc_tag;
  76        /* The number of scatterlists associated with this cmd */
  77        u32 tvc_sgl_count;
  78        /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
  79        u32 tvc_lun;
  80        /* Pointer to the SGL formatted memory from virtio-scsi */
  81        struct scatterlist *tvc_sgl;
  82        /* Pointer to response */
  83        struct virtio_scsi_cmd_resp __user *tvc_resp;
  84        /* Pointer to vhost_scsi for our device */
  85        struct vhost_scsi *tvc_vhost;
  86        /* Pointer to vhost_virtqueue for the cmd */
  87        struct vhost_virtqueue *tvc_vq;
  88        /* Pointer to vhost nexus memory */
  89        struct tcm_vhost_nexus *tvc_nexus;
  90        /* The TCM I/O descriptor that is accessed via container_of() */
  91        struct se_cmd tvc_se_cmd;
  92        /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
  93        struct work_struct work;
  94        /* Copy of the incoming SCSI command descriptor block (CDB) */
  95        unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
  96        /* Sense buffer that will be mapped into outgoing status */
  97        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
  98        /* Completed commands list, serviced from vhost worker thread */
  99        struct llist_node tvc_completion_list;
 100        /* Used to track inflight cmd */
 101        struct vhost_scsi_inflight *inflight;
 102};
 103
 104struct tcm_vhost_nexus {
 105        /* Pointer to TCM session for I_T Nexus */
 106        struct se_session *tvn_se_sess;
 107};
 108
 109struct tcm_vhost_nacl {
 110        /* Binary World Wide unique Port Name for Vhost Initiator port */
 111        u64 iport_wwpn;
 112        /* ASCII formatted WWPN for Sas Initiator port */
 113        char iport_name[TCM_VHOST_NAMELEN];
 114        /* Returned by tcm_vhost_make_nodeacl() */
 115        struct se_node_acl se_node_acl;
 116};
 117
 118struct tcm_vhost_tpg {
 119        /* Vhost port target portal group tag for TCM */
 120        u16 tport_tpgt;
 121        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 122        int tv_tpg_port_count;
 123        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 124        int tv_tpg_vhost_count;
 125        /* list for tcm_vhost_list */
 126        struct list_head tv_tpg_list;
 127        /* Used to protect access for tpg_nexus */
 128        struct mutex tv_tpg_mutex;
 129        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 130        struct tcm_vhost_nexus *tpg_nexus;
 131        /* Pointer back to tcm_vhost_tport */
 132        struct tcm_vhost_tport *tport;
 133        /* Returned by tcm_vhost_make_tpg() */
 134        struct se_portal_group se_tpg;
 135        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 136        struct vhost_scsi *vhost_scsi;
 137};
 138
 139struct tcm_vhost_tport {
 140        /* SCSI protocol the tport is providing */
 141        u8 tport_proto_id;
 142        /* Binary World Wide unique Port Name for Vhost Target port */
 143        u64 tport_wwpn;
 144        /* ASCII formatted WWPN for Vhost Target port */
 145        char tport_name[TCM_VHOST_NAMELEN];
 146        /* Returned by tcm_vhost_make_tport() */
 147        struct se_wwn tport_wwn;
 148};
 149
 150struct tcm_vhost_evt {
 151        /* event to be sent to guest */
 152        struct virtio_scsi_event event;
 153        /* event list, serviced from vhost worker thread */
 154        struct llist_node list;
 155};
 156
 157enum {
 158        VHOST_SCSI_VQ_CTL = 0,
 159        VHOST_SCSI_VQ_EVT = 1,
 160        VHOST_SCSI_VQ_IO = 2,
 161};
 162
 163enum {
 164        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
 165};
 166
 167#define VHOST_SCSI_MAX_TARGET   256
 168#define VHOST_SCSI_MAX_VQ       128
 169#define VHOST_SCSI_MAX_EVENT    128
 170
 171struct vhost_scsi_virtqueue {
 172        struct vhost_virtqueue vq;
 173        /*
 174         * Reference counting for inflight reqs, used for flush operation. At
 175         * each time, one reference tracks new commands submitted, while we
 176         * wait for another one to reach 0.
 177         */
 178        struct vhost_scsi_inflight inflights[2];
 179        /*
 180         * Indicate current inflight in use, protected by vq->mutex.
 181         * Writers must also take dev mutex and flush under it.
 182         */
 183        int inflight_idx;
 184};
 185
 186struct vhost_scsi {
 187        /* Protected by vhost_scsi->dev.mutex */
 188        struct tcm_vhost_tpg **vs_tpg;
 189        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 190
 191        struct vhost_dev dev;
 192        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 193
 194        struct vhost_work vs_completion_work; /* cmd completion work item */
 195        struct llist_head vs_completion_list; /* cmd completion queue */
 196
 197        struct vhost_work vs_event_work; /* evt injection work item */
 198        struct llist_head vs_event_list; /* evt injection queue */
 199
 200        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 201        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 202};
 203
 204/* Local pointer to allocated TCM configfs fabric module */
 205static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
 206
 207static struct workqueue_struct *tcm_vhost_workqueue;
 208
 209/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
 210static DEFINE_MUTEX(tcm_vhost_mutex);
 211static LIST_HEAD(tcm_vhost_list);
 212
 213static int iov_num_pages(struct iovec *iov)
 214{
 215        return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
 216               ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 217}
 218
 219static void tcm_vhost_done_inflight(struct kref *kref)
 220{
 221        struct vhost_scsi_inflight *inflight;
 222
 223        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 224        complete(&inflight->comp);
 225}
 226
 227static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
 228                                    struct vhost_scsi_inflight *old_inflight[])
 229{
 230        struct vhost_scsi_inflight *new_inflight;
 231        struct vhost_virtqueue *vq;
 232        int idx, i;
 233
 234        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 235                vq = &vs->vqs[i].vq;
 236
 237                mutex_lock(&vq->mutex);
 238
 239                /* store old infight */
 240                idx = vs->vqs[i].inflight_idx;
 241                if (old_inflight)
 242                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 243
 244                /* setup new infight */
 245                vs->vqs[i].inflight_idx = idx ^ 1;
 246                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 247                kref_init(&new_inflight->kref);
 248                init_completion(&new_inflight->comp);
 249
 250                mutex_unlock(&vq->mutex);
 251        }
 252}
 253
 254static struct vhost_scsi_inflight *
 255tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
 256{
 257        struct vhost_scsi_inflight *inflight;
 258        struct vhost_scsi_virtqueue *svq;
 259
 260        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 261        inflight = &svq->inflights[svq->inflight_idx];
 262        kref_get(&inflight->kref);
 263
 264        return inflight;
 265}
 266
 267static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
 268{
 269        kref_put(&inflight->kref, tcm_vhost_done_inflight);
 270}
 271
 272static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
 273{
 274        return 1;
 275}
 276
 277static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
 278{
 279        return 0;
 280}
 281
 282static char *tcm_vhost_get_fabric_name(void)
 283{
 284        return "vhost";
 285}
 286
 287static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 288{
 289        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
 290                                struct tcm_vhost_tpg, se_tpg);
 291        struct tcm_vhost_tport *tport = tpg->tport;
 292
 293        switch (tport->tport_proto_id) {
 294        case SCSI_PROTOCOL_SAS:
 295                return sas_get_fabric_proto_ident(se_tpg);
 296        case SCSI_PROTOCOL_FCP:
 297                return fc_get_fabric_proto_ident(se_tpg);
 298        case SCSI_PROTOCOL_ISCSI:
 299                return iscsi_get_fabric_proto_ident(se_tpg);
 300        default:
 301                pr_err("Unknown tport_proto_id: 0x%02x, using"
 302                        " SAS emulation\n", tport->tport_proto_id);
 303                break;
 304        }
 305
 306        return sas_get_fabric_proto_ident(se_tpg);
 307}
 308
 309static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
 310{
 311        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
 312                                struct tcm_vhost_tpg, se_tpg);
 313        struct tcm_vhost_tport *tport = tpg->tport;
 314
 315        return &tport->tport_name[0];
 316}
 317
 318static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
 319{
 320        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
 321                                struct tcm_vhost_tpg, se_tpg);
 322        return tpg->tport_tpgt;
 323}
 324
 325static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
 326{
 327        return 1;
 328}
 329
 330static u32
 331tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
 332                              struct se_node_acl *se_nacl,
 333                              struct t10_pr_registration *pr_reg,
 334                              int *format_code,
 335                              unsigned char *buf)
 336{
 337        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
 338                                struct tcm_vhost_tpg, se_tpg);
 339        struct tcm_vhost_tport *tport = tpg->tport;
 340
 341        switch (tport->tport_proto_id) {
 342        case SCSI_PROTOCOL_SAS:
 343                return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 344                                        format_code, buf);
 345        case SCSI_PROTOCOL_FCP:
 346                return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 347                                        format_code, buf);
 348        case SCSI_PROTOCOL_ISCSI:
 349                return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 350                                        format_code, buf);
 351        default:
 352                pr_err("Unknown tport_proto_id: 0x%02x, using"
 353                        " SAS emulation\n", tport->tport_proto_id);
 354                break;
 355        }
 356
 357        return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 358                        format_code, buf);
 359}
 360
 361static u32
 362tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 363                                  struct se_node_acl *se_nacl,
 364                                  struct t10_pr_registration *pr_reg,
 365                                  int *format_code)
 366{
 367        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
 368                                struct tcm_vhost_tpg, se_tpg);
 369        struct tcm_vhost_tport *tport = tpg->tport;
 370
 371        switch (tport->tport_proto_id) {
 372        case SCSI_PROTOCOL_SAS:
 373                return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 374                                        format_code);
 375        case SCSI_PROTOCOL_FCP:
 376                return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 377                                        format_code);
 378        case SCSI_PROTOCOL_ISCSI:
 379                return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 380                                        format_code);
 381        default:
 382                pr_err("Unknown tport_proto_id: 0x%02x, using"
 383                        " SAS emulation\n", tport->tport_proto_id);
 384                break;
 385        }
 386
 387        return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 388                        format_code);
 389}
 390
 391static char *
 392tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 393                                    const char *buf,
 394                                    u32 *out_tid_len,
 395                                    char **port_nexus_ptr)
 396{
 397        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
 398                                struct tcm_vhost_tpg, se_tpg);
 399        struct tcm_vhost_tport *tport = tpg->tport;
 400
 401        switch (tport->tport_proto_id) {
 402        case SCSI_PROTOCOL_SAS:
 403                return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 404                                        port_nexus_ptr);
 405        case SCSI_PROTOCOL_FCP:
 406                return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 407                                        port_nexus_ptr);
 408        case SCSI_PROTOCOL_ISCSI:
 409                return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 410                                        port_nexus_ptr);
 411        default:
 412                pr_err("Unknown tport_proto_id: 0x%02x, using"
 413                        " SAS emulation\n", tport->tport_proto_id);
 414                break;
 415        }
 416
 417        return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 418                        port_nexus_ptr);
 419}
 420
 421static struct se_node_acl *
 422tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
 423{
 424        struct tcm_vhost_nacl *nacl;
 425
 426        nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
 427        if (!nacl) {
 428                pr_err("Unable to allocate struct tcm_vhost_nacl\n");
 429                return NULL;
 430        }
 431
 432        return &nacl->se_node_acl;
 433}
 434
 435static void
 436tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
 437                             struct se_node_acl *se_nacl)
 438{
 439        struct tcm_vhost_nacl *nacl = container_of(se_nacl,
 440                        struct tcm_vhost_nacl, se_node_acl);
 441        kfree(nacl);
 442}
 443
 444static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
 445{
 446        return 1;
 447}
 448
 449static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
 450{
 451        struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
 452                                struct tcm_vhost_cmd, tvc_se_cmd);
 453
 454        if (tv_cmd->tvc_sgl_count) {
 455                u32 i;
 456                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 457                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 458
 459                kfree(tv_cmd->tvc_sgl);
 460        }
 461
 462        tcm_vhost_put_inflight(tv_cmd->inflight);
 463        kfree(tv_cmd);
 464}
 465
 466static int tcm_vhost_shutdown_session(struct se_session *se_sess)
 467{
 468        return 0;
 469}
 470
 471static void tcm_vhost_close_session(struct se_session *se_sess)
 472{
 473        return;
 474}
 475
 476static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
 477{
 478        return 0;
 479}
 480
 481static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
 482{
 483        /* Go ahead and process the write immediately */
 484        target_execute_cmd(se_cmd);
 485        return 0;
 486}
 487
 488static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
 489{
 490        return 0;
 491}
 492
 493static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
 494{
 495        return;
 496}
 497
 498static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
 499{
 500        return 0;
 501}
 502
 503static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
 504{
 505        return 0;
 506}
 507
 508static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
 509{
 510        struct vhost_scsi *vs = cmd->tvc_vhost;
 511
 512        llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 513
 514        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 515}
 516
 517static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
 518{
 519        struct tcm_vhost_cmd *cmd = container_of(se_cmd,
 520                                struct tcm_vhost_cmd, tvc_se_cmd);
 521        vhost_scsi_complete_cmd(cmd);
 522        return 0;
 523}
 524
 525static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
 526{
 527        struct tcm_vhost_cmd *cmd = container_of(se_cmd,
 528                                struct tcm_vhost_cmd, tvc_se_cmd);
 529        vhost_scsi_complete_cmd(cmd);
 530        return 0;
 531}
 532
 533static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
 534{
 535        return;
 536}
 537
 538static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
 539{
 540        vs->vs_events_nr--;
 541        kfree(evt);
 542}
 543
 544static struct tcm_vhost_evt *
 545tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 546                       u32 event, u32 reason)
 547{
 548        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 549        struct tcm_vhost_evt *evt;
 550
 551        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 552                vs->vs_events_missed = true;
 553                return NULL;
 554        }
 555
 556        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 557        if (!evt) {
 558                vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
 559                vs->vs_events_missed = true;
 560                return NULL;
 561        }
 562
 563        evt->event.event = event;
 564        evt->event.reason = reason;
 565        vs->vs_events_nr++;
 566
 567        return evt;
 568}
 569
 570static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
 571{
 572        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 573
 574        /* TODO locking against target/backend threads? */
 575        transport_generic_free_cmd(se_cmd, 0);
 576
 577}
 578
 579static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 580{
 581        return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
 582}
 583
 584static void
 585tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
 586{
 587        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 588        struct virtio_scsi_event *event = &evt->event;
 589        struct virtio_scsi_event __user *eventp;
 590        unsigned out, in;
 591        int head, ret;
 592
 593        if (!vq->private_data) {
 594                vs->vs_events_missed = true;
 595                return;
 596        }
 597
 598again:
 599        vhost_disable_notify(&vs->dev, vq);
 600        head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
 601                        ARRAY_SIZE(vq->iov), &out, &in,
 602                        NULL, NULL);
 603        if (head < 0) {
 604                vs->vs_events_missed = true;
 605                return;
 606        }
 607        if (head == vq->num) {
 608                if (vhost_enable_notify(&vs->dev, vq))
 609                        goto again;
 610                vs->vs_events_missed = true;
 611                return;
 612        }
 613
 614        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 615                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 616                                vq->iov[out].iov_len);
 617                vs->vs_events_missed = true;
 618                return;
 619        }
 620
 621        if (vs->vs_events_missed) {
 622                event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
 623                vs->vs_events_missed = false;
 624        }
 625
 626        eventp = vq->iov[out].iov_base;
 627        ret = __copy_to_user(eventp, event, sizeof(*event));
 628        if (!ret)
 629                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 630        else
 631                vq_err(vq, "Faulted on tcm_vhost_send_event\n");
 632}
 633
 634static void tcm_vhost_evt_work(struct vhost_work *work)
 635{
 636        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 637                                        vs_event_work);
 638        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 639        struct tcm_vhost_evt *evt;
 640        struct llist_node *llnode;
 641
 642        mutex_lock(&vq->mutex);
 643        llnode = llist_del_all(&vs->vs_event_list);
 644        while (llnode) {
 645                evt = llist_entry(llnode, struct tcm_vhost_evt, list);
 646                llnode = llist_next(llnode);
 647                tcm_vhost_do_evt_work(vs, evt);
 648                tcm_vhost_free_evt(vs, evt);
 649        }
 650        mutex_unlock(&vq->mutex);
 651}
 652
 653/* Fill in status and signal that we are done processing this command
 654 *
 655 * This is scheduled in the vhost work queue so we are called with the owner
 656 * process mm and can access the vring.
 657 */
 658static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 659{
 660        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 661                                        vs_completion_work);
 662        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 663        struct virtio_scsi_cmd_resp v_rsp;
 664        struct tcm_vhost_cmd *cmd;
 665        struct llist_node *llnode;
 666        struct se_cmd *se_cmd;
 667        int ret, vq;
 668
 669        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 670        llnode = llist_del_all(&vs->vs_completion_list);
 671        while (llnode) {
 672                cmd = llist_entry(llnode, struct tcm_vhost_cmd,
 673                                     tvc_completion_list);
 674                llnode = llist_next(llnode);
 675                se_cmd = &cmd->tvc_se_cmd;
 676
 677                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 678                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 679
 680                memset(&v_rsp, 0, sizeof(v_rsp));
 681                v_rsp.resid = se_cmd->residual_count;
 682                /* TODO is status_qualifier field needed? */
 683                v_rsp.status = se_cmd->scsi_status;
 684                v_rsp.sense_len = se_cmd->scsi_sense_length;
 685                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 686                       v_rsp.sense_len);
 687                ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
 688                if (likely(ret == 0)) {
 689                        struct vhost_scsi_virtqueue *q;
 690                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 691                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 692                        vq = q - vs->vqs;
 693                        __set_bit(vq, signal);
 694                } else
 695                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 696
 697                vhost_scsi_free_cmd(cmd);
 698        }
 699
 700        vq = -1;
 701        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 702                < VHOST_SCSI_MAX_VQ)
 703                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 704}
 705
 706static struct tcm_vhost_cmd *
 707vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
 708                        struct tcm_vhost_tpg *tpg,
 709                        struct virtio_scsi_cmd_req *v_req,
 710                        u32 exp_data_len,
 711                        int data_direction)
 712{
 713        struct tcm_vhost_cmd *cmd;
 714        struct tcm_vhost_nexus *tv_nexus;
 715
 716        tv_nexus = tpg->tpg_nexus;
 717        if (!tv_nexus) {
 718                pr_err("Unable to locate active struct tcm_vhost_nexus\n");
 719                return ERR_PTR(-EIO);
 720        }
 721
 722        cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
 723        if (!cmd) {
 724                pr_err("Unable to allocate struct tcm_vhost_cmd\n");
 725                return ERR_PTR(-ENOMEM);
 726        }
 727        cmd->tvc_tag = v_req->tag;
 728        cmd->tvc_task_attr = v_req->task_attr;
 729        cmd->tvc_exp_data_len = exp_data_len;
 730        cmd->tvc_data_direction = data_direction;
 731        cmd->tvc_nexus = tv_nexus;
 732        cmd->inflight = tcm_vhost_get_inflight(vq);
 733
 734        return cmd;
 735}
 736
 737/*
 738 * Map a user memory range into a scatterlist
 739 *
 740 * Returns the number of scatterlist entries used or -errno on error.
 741 */
 742static int
 743vhost_scsi_map_to_sgl(struct scatterlist *sgl,
 744                      unsigned int sgl_count,
 745                      struct iovec *iov,
 746                      int write)
 747{
 748        unsigned int npages = 0, pages_nr, offset, nbytes;
 749        struct scatterlist *sg = sgl;
 750        void __user *ptr = iov->iov_base;
 751        size_t len = iov->iov_len;
 752        struct page **pages;
 753        int ret, i;
 754
 755        pages_nr = iov_num_pages(iov);
 756        if (pages_nr > sgl_count)
 757                return -ENOBUFS;
 758
 759        pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
 760        if (!pages)
 761                return -ENOMEM;
 762
 763        ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
 764        /* No pages were pinned */
 765        if (ret < 0)
 766                goto out;
 767        /* Less pages pinned than wanted */
 768        if (ret != pages_nr) {
 769                for (i = 0; i < ret; i++)
 770                        put_page(pages[i]);
 771                ret = -EFAULT;
 772                goto out;
 773        }
 774
 775        while (len > 0) {
 776                offset = (uintptr_t)ptr & ~PAGE_MASK;
 777                nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
 778                sg_set_page(sg, pages[npages], nbytes, offset);
 779                ptr += nbytes;
 780                len -= nbytes;
 781                sg++;
 782                npages++;
 783        }
 784
 785out:
 786        kfree(pages);
 787        return ret;
 788}
 789
 790static int
 791vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
 792                          struct iovec *iov,
 793                          unsigned int niov,
 794                          int write)
 795{
 796        int ret;
 797        unsigned int i;
 798        u32 sgl_count;
 799        struct scatterlist *sg;
 800
 801        /*
 802         * Find out how long sglist needs to be
 803         */
 804        sgl_count = 0;
 805        for (i = 0; i < niov; i++)
 806                sgl_count += iov_num_pages(&iov[i]);
 807
 808        /* TODO overflow checking */
 809
 810        sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
 811        if (!sg)
 812                return -ENOMEM;
 813        pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
 814               sg, sgl_count, !sg);
 815        sg_init_table(sg, sgl_count);
 816
 817        cmd->tvc_sgl = sg;
 818        cmd->tvc_sgl_count = sgl_count;
 819
 820        pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
 821        for (i = 0; i < niov; i++) {
 822                ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
 823                if (ret < 0) {
 824                        for (i = 0; i < cmd->tvc_sgl_count; i++)
 825                                put_page(sg_page(&cmd->tvc_sgl[i]));
 826                        kfree(cmd->tvc_sgl);
 827                        cmd->tvc_sgl = NULL;
 828                        cmd->tvc_sgl_count = 0;
 829                        return ret;
 830                }
 831
 832                sg += ret;
 833                sgl_count -= ret;
 834        }
 835        return 0;
 836}
 837
 838static void tcm_vhost_submission_work(struct work_struct *work)
 839{
 840        struct tcm_vhost_cmd *cmd =
 841                container_of(work, struct tcm_vhost_cmd, work);
 842        struct tcm_vhost_nexus *tv_nexus;
 843        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 844        struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
 845        int rc, sg_no_bidi = 0;
 846
 847        if (cmd->tvc_sgl_count) {
 848                sg_ptr = cmd->tvc_sgl;
 849/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
 850#if 0
 851                if (se_cmd->se_cmd_flags & SCF_BIDI) {
 852                        sg_bidi_ptr = NULL;
 853                        sg_no_bidi = 0;
 854                }
 855#endif
 856        } else {
 857                sg_ptr = NULL;
 858        }
 859        tv_nexus = cmd->tvc_nexus;
 860
 861        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 862                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 863                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 864                        cmd->tvc_task_attr, cmd->tvc_data_direction,
 865                        TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
 866                        sg_bidi_ptr, sg_no_bidi);
 867        if (rc < 0) {
 868                transport_send_check_condition_and_sense(se_cmd,
 869                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 870                transport_generic_free_cmd(se_cmd, 0);
 871        }
 872}
 873
 874static void
 875vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 876                           struct vhost_virtqueue *vq,
 877                           int head, unsigned out)
 878{
 879        struct virtio_scsi_cmd_resp __user *resp;
 880        struct virtio_scsi_cmd_resp rsp;
 881        int ret;
 882
 883        memset(&rsp, 0, sizeof(rsp));
 884        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 885        resp = vq->iov[out].iov_base;
 886        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 887        if (!ret)
 888                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 889        else
 890                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 891}
 892
 893static void
 894vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 895{
 896        struct tcm_vhost_tpg **vs_tpg;
 897        struct virtio_scsi_cmd_req v_req;
 898        struct tcm_vhost_tpg *tpg;
 899        struct tcm_vhost_cmd *cmd;
 900        u32 exp_data_len, data_first, data_num, data_direction;
 901        unsigned out, in, i;
 902        int head, ret;
 903        u8 target;
 904
 905        mutex_lock(&vq->mutex);
 906        /*
 907         * We can handle the vq only after the endpoint is setup by calling the
 908         * VHOST_SCSI_SET_ENDPOINT ioctl.
 909         */
 910        vs_tpg = vq->private_data;
 911        if (!vs_tpg)
 912                goto out;
 913
 914        vhost_disable_notify(&vs->dev, vq);
 915
 916        for (;;) {
 917                head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
 918                                        ARRAY_SIZE(vq->iov), &out, &in,
 919                                        NULL, NULL);
 920                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 921                                        head, out, in);
 922                /* On error, stop handling until the next kick. */
 923                if (unlikely(head < 0))
 924                        break;
 925                /* Nothing new?  Wait for eventfd to tell us they refilled. */
 926                if (head == vq->num) {
 927                        if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 928                                vhost_disable_notify(&vs->dev, vq);
 929                                continue;
 930                        }
 931                        break;
 932                }
 933
 934/* FIXME: BIDI operation */
 935                if (out == 1 && in == 1) {
 936                        data_direction = DMA_NONE;
 937                        data_first = 0;
 938                        data_num = 0;
 939                } else if (out == 1 && in > 1) {
 940                        data_direction = DMA_FROM_DEVICE;
 941                        data_first = out + 1;
 942                        data_num = in - 1;
 943                } else if (out > 1 && in == 1) {
 944                        data_direction = DMA_TO_DEVICE;
 945                        data_first = 1;
 946                        data_num = out - 1;
 947                } else {
 948                        vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
 949                                        out, in);
 950                        break;
 951                }
 952
 953                /*
 954                 * Check for a sane resp buffer so we can report errors to
 955                 * the guest.
 956                 */
 957                if (unlikely(vq->iov[out].iov_len !=
 958                                        sizeof(struct virtio_scsi_cmd_resp))) {
 959                        vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
 960                                " bytes\n", vq->iov[out].iov_len);
 961                        break;
 962                }
 963
 964                if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
 965                        vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
 966                                " bytes\n", vq->iov[0].iov_len);
 967                        break;
 968                }
 969                pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
 970                        " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
 971                ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
 972                                sizeof(v_req));
 973                if (unlikely(ret)) {
 974                        vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
 975                        break;
 976                }
 977
 978                /* Extract the tpgt */
 979                target = v_req.lun[1];
 980                tpg = ACCESS_ONCE(vs_tpg[target]);
 981
 982                /* Target does not exist, fail the request */
 983                if (unlikely(!tpg)) {
 984                        vhost_scsi_send_bad_target(vs, vq, head, out);
 985                        continue;
 986                }
 987
 988                exp_data_len = 0;
 989                for (i = 0; i < data_num; i++)
 990                        exp_data_len += vq->iov[data_first + i].iov_len;
 991
 992                cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req,
 993                                        exp_data_len, data_direction);
 994                if (IS_ERR(cmd)) {
 995                        vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
 996                                        PTR_ERR(cmd));
 997                        goto err_cmd;
 998                }
 999                pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1000                        ": %d\n", cmd, exp_data_len, data_direction);
1001
1002                cmd->tvc_vhost = vs;
1003                cmd->tvc_vq = vq;
1004                cmd->tvc_resp = vq->iov[out].iov_base;
1005
1006                /*
1007                 * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1008                 * that will be used by tcm_vhost_new_cmd_map() and down into
1009                 * target_setup_cmd_from_cdb()
1010                 */
1011                memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1012                /*
1013                 * Check that the recieved CDB size does not exceeded our
1014                 * hardcoded max for tcm_vhost
1015                 */
1016                /* TODO what if cdb was too small for varlen cdb header? */
1017                if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1018                                        TCM_VHOST_MAX_CDB_SIZE)) {
1019                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
1020                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1021                                scsi_command_size(cmd->tvc_cdb),
1022                                TCM_VHOST_MAX_CDB_SIZE);
1023                        goto err_free;
1024                }
1025                cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1026
1027                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1028                        cmd->tvc_cdb[0], cmd->tvc_lun);
1029
1030                if (data_direction != DMA_NONE) {
1031                        ret = vhost_scsi_map_iov_to_sgl(cmd,
1032                                        &vq->iov[data_first], data_num,
1033                                        data_direction == DMA_TO_DEVICE);
1034                        if (unlikely(ret)) {
1035                                vq_err(vq, "Failed to map iov to sgl\n");
1036                                goto err_free;
1037                        }
1038                }
1039
1040                /*
1041                 * Save the descriptor from vhost_get_vq_desc() to be used to
1042                 * complete the virtio-scsi request in TCM callback context via
1043                 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1044                 */
1045                cmd->tvc_vq_desc = head;
1046                /*
1047                 * Dispatch tv_cmd descriptor for cmwq execution in process
1048                 * context provided by tcm_vhost_workqueue.  This also ensures
1049                 * tv_cmd is executed on the same kworker CPU as this vhost
1050                 * thread to gain positive L2 cache locality effects..
1051                 */
1052                INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1053                queue_work(tcm_vhost_workqueue, &cmd->work);
1054        }
1055
1056        mutex_unlock(&vq->mutex);
1057        return;
1058
1059err_free:
1060        vhost_scsi_free_cmd(cmd);
1061err_cmd:
1062        vhost_scsi_send_bad_target(vs, vq, head, out);
1063out:
1064        mutex_unlock(&vq->mutex);
1065}
1066
1067static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1068{
1069        pr_debug("%s: The handling func for control queue.\n", __func__);
1070}
1071
1072static void
1073tcm_vhost_send_evt(struct vhost_scsi *vs,
1074                   struct tcm_vhost_tpg *tpg,
1075                   struct se_lun *lun,
1076                   u32 event,
1077                   u32 reason)
1078{
1079        struct tcm_vhost_evt *evt;
1080
1081        evt = tcm_vhost_allocate_evt(vs, event, reason);
1082        if (!evt)
1083                return;
1084
1085        if (tpg && lun) {
1086                /* TODO: share lun setup code with virtio-scsi.ko */
1087                /*
1088                 * Note: evt->event is zeroed when we allocate it and
1089                 * lun[4-7] need to be zero according to virtio-scsi spec.
1090                 */
1091                evt->event.lun[0] = 0x01;
1092                evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1093                if (lun->unpacked_lun >= 256)
1094                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1095                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1096        }
1097
1098        llist_add(&evt->list, &vs->vs_event_list);
1099        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1100}
1101
1102static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1103{
1104        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1105                                                poll.work);
1106        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1107
1108        mutex_lock(&vq->mutex);
1109        if (!vq->private_data)
1110                goto out;
1111
1112        if (vs->vs_events_missed)
1113                tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1114out:
1115        mutex_unlock(&vq->mutex);
1116}
1117
1118static void vhost_scsi_handle_kick(struct vhost_work *work)
1119{
1120        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1121                                                poll.work);
1122        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1123
1124        vhost_scsi_handle_vq(vs, vq);
1125}
1126
1127static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1128{
1129        vhost_poll_flush(&vs->vqs[index].vq.poll);
1130}
1131
1132/* Callers must hold dev mutex */
1133static void vhost_scsi_flush(struct vhost_scsi *vs)
1134{
1135        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1136        int i;
1137
1138        /* Init new inflight and remember the old inflight */
1139        tcm_vhost_init_inflight(vs, old_inflight);
1140
1141        /*
1142         * The inflight->kref was initialized to 1. We decrement it here to
1143         * indicate the start of the flush operation so that it will reach 0
1144         * when all the reqs are finished.
1145         */
1146        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1147                kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1148
1149        /* Flush both the vhost poll and vhost work */
1150        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1151                vhost_scsi_flush_vq(vs, i);
1152        vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1153        vhost_work_flush(&vs->dev, &vs->vs_event_work);
1154
1155        /* Wait for all reqs issued before the flush to be finished */
1156        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1157                wait_for_completion(&old_inflight[i]->comp);
1158}
1159
1160/*
1161 * Called from vhost_scsi_ioctl() context to walk the list of available
1162 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1163 *
1164 *  The lock nesting rule is:
1165 *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1166 */
1167static int
1168vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1169                        struct vhost_scsi_target *t)
1170{
1171        struct tcm_vhost_tport *tv_tport;
1172        struct tcm_vhost_tpg *tpg;
1173        struct tcm_vhost_tpg **vs_tpg;
1174        struct vhost_virtqueue *vq;
1175        int index, ret, i, len;
1176        bool match = false;
1177
1178        mutex_lock(&tcm_vhost_mutex);
1179        mutex_lock(&vs->dev.mutex);
1180
1181        /* Verify that ring has been setup correctly. */
1182        for (index = 0; index < vs->dev.nvqs; ++index) {
1183                /* Verify that ring has been setup correctly. */
1184                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1185                        ret = -EFAULT;
1186                        goto out;
1187                }
1188        }
1189
1190        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1191        vs_tpg = kzalloc(len, GFP_KERNEL);
1192        if (!vs_tpg) {
1193                ret = -ENOMEM;
1194                goto out;
1195        }
1196        if (vs->vs_tpg)
1197                memcpy(vs_tpg, vs->vs_tpg, len);
1198
1199        list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1200                mutex_lock(&tpg->tv_tpg_mutex);
1201                if (!tpg->tpg_nexus) {
1202                        mutex_unlock(&tpg->tv_tpg_mutex);
1203                        continue;
1204                }
1205                if (tpg->tv_tpg_vhost_count != 0) {
1206                        mutex_unlock(&tpg->tv_tpg_mutex);
1207                        continue;
1208                }
1209                tv_tport = tpg->tport;
1210
1211                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1212                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1213                                kfree(vs_tpg);
1214                                mutex_unlock(&tpg->tv_tpg_mutex);
1215                                ret = -EEXIST;
1216                                goto out;
1217                        }
1218                        tpg->tv_tpg_vhost_count++;
1219                        tpg->vhost_scsi = vs;
1220                        vs_tpg[tpg->tport_tpgt] = tpg;
1221                        smp_mb__after_atomic_inc();
1222                        match = true;
1223                }
1224                mutex_unlock(&tpg->tv_tpg_mutex);
1225        }
1226
1227        if (match) {
1228                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1229                       sizeof(vs->vs_vhost_wwpn));
1230                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1231                        vq = &vs->vqs[i].vq;
1232                        mutex_lock(&vq->mutex);
1233                        vq->private_data = vs_tpg;
1234                        vhost_init_used(vq);
1235                        mutex_unlock(&vq->mutex);
1236                }
1237                ret = 0;
1238        } else {
1239                ret = -EEXIST;
1240        }
1241
1242        /*
1243         * Act as synchronize_rcu to make sure access to
1244         * old vs->vs_tpg is finished.
1245         */
1246        vhost_scsi_flush(vs);
1247        kfree(vs->vs_tpg);
1248        vs->vs_tpg = vs_tpg;
1249
1250out:
1251        mutex_unlock(&vs->dev.mutex);
1252        mutex_unlock(&tcm_vhost_mutex);
1253        return ret;
1254}
1255
1256static int
1257vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1258                          struct vhost_scsi_target *t)
1259{
1260        struct tcm_vhost_tport *tv_tport;
1261        struct tcm_vhost_tpg *tpg;
1262        struct vhost_virtqueue *vq;
1263        bool match = false;
1264        int index, ret, i;
1265        u8 target;
1266
1267        mutex_lock(&tcm_vhost_mutex);
1268        mutex_lock(&vs->dev.mutex);
1269        /* Verify that ring has been setup correctly. */
1270        for (index = 0; index < vs->dev.nvqs; ++index) {
1271                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1272                        ret = -EFAULT;
1273                        goto err_dev;
1274                }
1275        }
1276
1277        if (!vs->vs_tpg) {
1278                ret = 0;
1279                goto err_dev;
1280        }
1281
1282        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1283                target = i;
1284                tpg = vs->vs_tpg[target];
1285                if (!tpg)
1286                        continue;
1287
1288                mutex_lock(&tpg->tv_tpg_mutex);
1289                tv_tport = tpg->tport;
1290                if (!tv_tport) {
1291                        ret = -ENODEV;
1292                        goto err_tpg;
1293                }
1294
1295                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1296                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1297                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1298                                tv_tport->tport_name, tpg->tport_tpgt,
1299                                t->vhost_wwpn, t->vhost_tpgt);
1300                        ret = -EINVAL;
1301                        goto err_tpg;
1302                }
1303                tpg->tv_tpg_vhost_count--;
1304                tpg->vhost_scsi = NULL;
1305                vs->vs_tpg[target] = NULL;
1306                match = true;
1307                mutex_unlock(&tpg->tv_tpg_mutex);
1308        }
1309        if (match) {
1310                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1311                        vq = &vs->vqs[i].vq;
1312                        mutex_lock(&vq->mutex);
1313                        vq->private_data = NULL;
1314                        mutex_unlock(&vq->mutex);
1315                }
1316        }
1317        /*
1318         * Act as synchronize_rcu to make sure access to
1319         * old vs->vs_tpg is finished.
1320         */
1321        vhost_scsi_flush(vs);
1322        kfree(vs->vs_tpg);
1323        vs->vs_tpg = NULL;
1324        WARN_ON(vs->vs_events_nr);
1325        mutex_unlock(&vs->dev.mutex);
1326        mutex_unlock(&tcm_vhost_mutex);
1327        return 0;
1328
1329err_tpg:
1330        mutex_unlock(&tpg->tv_tpg_mutex);
1331err_dev:
1332        mutex_unlock(&vs->dev.mutex);
1333        mutex_unlock(&tcm_vhost_mutex);
1334        return ret;
1335}
1336
1337static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1338{
1339        if (features & ~VHOST_SCSI_FEATURES)
1340                return -EOPNOTSUPP;
1341
1342        mutex_lock(&vs->dev.mutex);
1343        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1344            !vhost_log_access_ok(&vs->dev)) {
1345                mutex_unlock(&vs->dev.mutex);
1346                return -EFAULT;
1347        }
1348        vs->dev.acked_features = features;
1349        smp_wmb();
1350        vhost_scsi_flush(vs);
1351        mutex_unlock(&vs->dev.mutex);
1352        return 0;
1353}
1354
1355static int vhost_scsi_open(struct inode *inode, struct file *f)
1356{
1357        struct vhost_scsi *vs;
1358        struct vhost_virtqueue **vqs;
1359        int r, i;
1360
1361        vs = kzalloc(sizeof(*vs), GFP_KERNEL);
1362        if (!vs)
1363                return -ENOMEM;
1364
1365        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1366        if (!vqs) {
1367                kfree(vs);
1368                return -ENOMEM;
1369        }
1370
1371        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1372        vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1373
1374        vs->vs_events_nr = 0;
1375        vs->vs_events_missed = false;
1376
1377        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1378        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1379        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1380        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1381        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1382                vqs[i] = &vs->vqs[i].vq;
1383                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1384        }
1385        r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1386
1387        tcm_vhost_init_inflight(vs, NULL);
1388
1389        if (r < 0) {
1390                kfree(vqs);
1391                kfree(vs);
1392                return r;
1393        }
1394
1395        f->private_data = vs;
1396        return 0;
1397}
1398
1399static int vhost_scsi_release(struct inode *inode, struct file *f)
1400{
1401        struct vhost_scsi *vs = f->private_data;
1402        struct vhost_scsi_target t;
1403
1404        mutex_lock(&vs->dev.mutex);
1405        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1406        mutex_unlock(&vs->dev.mutex);
1407        vhost_scsi_clear_endpoint(vs, &t);
1408        vhost_dev_stop(&vs->dev);
1409        vhost_dev_cleanup(&vs->dev, false);
1410        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1411        vhost_scsi_flush(vs);
1412        kfree(vs->dev.vqs);
1413        kfree(vs);
1414        return 0;
1415}
1416
1417static long
1418vhost_scsi_ioctl(struct file *f,
1419                 unsigned int ioctl,
1420                 unsigned long arg)
1421{
1422        struct vhost_scsi *vs = f->private_data;
1423        struct vhost_scsi_target backend;
1424        void __user *argp = (void __user *)arg;
1425        u64 __user *featurep = argp;
1426        u32 __user *eventsp = argp;
1427        u32 events_missed;
1428        u64 features;
1429        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1430        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1431
1432        switch (ioctl) {
1433        case VHOST_SCSI_SET_ENDPOINT:
1434                if (copy_from_user(&backend, argp, sizeof backend))
1435                        return -EFAULT;
1436                if (backend.reserved != 0)
1437                        return -EOPNOTSUPP;
1438
1439                return vhost_scsi_set_endpoint(vs, &backend);
1440        case VHOST_SCSI_CLEAR_ENDPOINT:
1441                if (copy_from_user(&backend, argp, sizeof backend))
1442                        return -EFAULT;
1443                if (backend.reserved != 0)
1444                        return -EOPNOTSUPP;
1445
1446                return vhost_scsi_clear_endpoint(vs, &backend);
1447        case VHOST_SCSI_GET_ABI_VERSION:
1448                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1449                        return -EFAULT;
1450                return 0;
1451        case VHOST_SCSI_SET_EVENTS_MISSED:
1452                if (get_user(events_missed, eventsp))
1453                        return -EFAULT;
1454                mutex_lock(&vq->mutex);
1455                vs->vs_events_missed = events_missed;
1456                mutex_unlock(&vq->mutex);
1457                return 0;
1458        case VHOST_SCSI_GET_EVENTS_MISSED:
1459                mutex_lock(&vq->mutex);
1460                events_missed = vs->vs_events_missed;
1461                mutex_unlock(&vq->mutex);
1462                if (put_user(events_missed, eventsp))
1463                        return -EFAULT;
1464                return 0;
1465        case VHOST_GET_FEATURES:
1466                features = VHOST_SCSI_FEATURES;
1467                if (copy_to_user(featurep, &features, sizeof features))
1468                        return -EFAULT;
1469                return 0;
1470        case VHOST_SET_FEATURES:
1471                if (copy_from_user(&features, featurep, sizeof features))
1472                        return -EFAULT;
1473                return vhost_scsi_set_features(vs, features);
1474        default:
1475                mutex_lock(&vs->dev.mutex);
1476                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1477                /* TODO: flush backend after dev ioctl. */
1478                if (r == -ENOIOCTLCMD)
1479                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1480                mutex_unlock(&vs->dev.mutex);
1481                return r;
1482        }
1483}
1484
1485#ifdef CONFIG_COMPAT
1486static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1487                                unsigned long arg)
1488{
1489        return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1490}
1491#endif
1492
1493static const struct file_operations vhost_scsi_fops = {
1494        .owner          = THIS_MODULE,
1495        .release        = vhost_scsi_release,
1496        .unlocked_ioctl = vhost_scsi_ioctl,
1497#ifdef CONFIG_COMPAT
1498        .compat_ioctl   = vhost_scsi_compat_ioctl,
1499#endif
1500        .open           = vhost_scsi_open,
1501        .llseek         = noop_llseek,
1502};
1503
1504static struct miscdevice vhost_scsi_misc = {
1505        MISC_DYNAMIC_MINOR,
1506        "vhost-scsi",
1507        &vhost_scsi_fops,
1508};
1509
1510static int __init vhost_scsi_register(void)
1511{
1512        return misc_register(&vhost_scsi_misc);
1513}
1514
1515static int vhost_scsi_deregister(void)
1516{
1517        return misc_deregister(&vhost_scsi_misc);
1518}
1519
1520static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1521{
1522        switch (tport->tport_proto_id) {
1523        case SCSI_PROTOCOL_SAS:
1524                return "SAS";
1525        case SCSI_PROTOCOL_FCP:
1526                return "FCP";
1527        case SCSI_PROTOCOL_ISCSI:
1528                return "iSCSI";
1529        default:
1530                break;
1531        }
1532
1533        return "Unknown";
1534}
1535
1536static void
1537tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1538                  struct se_lun *lun, bool plug)
1539{
1540
1541        struct vhost_scsi *vs = tpg->vhost_scsi;
1542        struct vhost_virtqueue *vq;
1543        u32 reason;
1544
1545        if (!vs)
1546                return;
1547
1548        mutex_lock(&vs->dev.mutex);
1549        if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1550                mutex_unlock(&vs->dev.mutex);
1551                return;
1552        }
1553
1554        if (plug)
1555                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1556        else
1557                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1558
1559        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1560        mutex_lock(&vq->mutex);
1561        tcm_vhost_send_evt(vs, tpg, lun,
1562                        VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1563        mutex_unlock(&vq->mutex);
1564        mutex_unlock(&vs->dev.mutex);
1565}
1566
1567static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1568{
1569        tcm_vhost_do_plug(tpg, lun, true);
1570}
1571
1572static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1573{
1574        tcm_vhost_do_plug(tpg, lun, false);
1575}
1576
1577static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1578                               struct se_lun *lun)
1579{
1580        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1581                                struct tcm_vhost_tpg, se_tpg);
1582
1583        mutex_lock(&tcm_vhost_mutex);
1584
1585        mutex_lock(&tpg->tv_tpg_mutex);
1586        tpg->tv_tpg_port_count++;
1587        mutex_unlock(&tpg->tv_tpg_mutex);
1588
1589        tcm_vhost_hotplug(tpg, lun);
1590
1591        mutex_unlock(&tcm_vhost_mutex);
1592
1593        return 0;
1594}
1595
1596static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1597                                  struct se_lun *lun)
1598{
1599        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1600                                struct tcm_vhost_tpg, se_tpg);
1601
1602        mutex_lock(&tcm_vhost_mutex);
1603
1604        mutex_lock(&tpg->tv_tpg_mutex);
1605        tpg->tv_tpg_port_count--;
1606        mutex_unlock(&tpg->tv_tpg_mutex);
1607
1608        tcm_vhost_hotunplug(tpg, lun);
1609
1610        mutex_unlock(&tcm_vhost_mutex);
1611}
1612
1613static struct se_node_acl *
1614tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1615                       struct config_group *group,
1616                       const char *name)
1617{
1618        struct se_node_acl *se_nacl, *se_nacl_new;
1619        struct tcm_vhost_nacl *nacl;
1620        u64 wwpn = 0;
1621        u32 nexus_depth;
1622
1623        /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1624                return ERR_PTR(-EINVAL); */
1625        se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1626        if (!se_nacl_new)
1627                return ERR_PTR(-ENOMEM);
1628
1629        nexus_depth = 1;
1630        /*
1631         * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1632         * when converting a NodeACL from demo mode -> explict
1633         */
1634        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1635                                name, nexus_depth);
1636        if (IS_ERR(se_nacl)) {
1637                tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1638                return se_nacl;
1639        }
1640        /*
1641         * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1642         */
1643        nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1644        nacl->iport_wwpn = wwpn;
1645
1646        return se_nacl;
1647}
1648
1649static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1650{
1651        struct tcm_vhost_nacl *nacl = container_of(se_acl,
1652                                struct tcm_vhost_nacl, se_node_acl);
1653        core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1654        kfree(nacl);
1655}
1656
1657static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1658                                const char *name)
1659{
1660        struct se_portal_group *se_tpg;
1661        struct tcm_vhost_nexus *tv_nexus;
1662
1663        mutex_lock(&tpg->tv_tpg_mutex);
1664        if (tpg->tpg_nexus) {
1665                mutex_unlock(&tpg->tv_tpg_mutex);
1666                pr_debug("tpg->tpg_nexus already exists\n");
1667                return -EEXIST;
1668        }
1669        se_tpg = &tpg->se_tpg;
1670
1671        tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1672        if (!tv_nexus) {
1673                mutex_unlock(&tpg->tv_tpg_mutex);
1674                pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1675                return -ENOMEM;
1676        }
1677        /*
1678         *  Initialize the struct se_session pointer
1679         */
1680        tv_nexus->tvn_se_sess = transport_init_session();
1681        if (IS_ERR(tv_nexus->tvn_se_sess)) {
1682                mutex_unlock(&tpg->tv_tpg_mutex);
1683                kfree(tv_nexus);
1684                return -ENOMEM;
1685        }
1686        /*
1687         * Since we are running in 'demo mode' this call with generate a
1688         * struct se_node_acl for the tcm_vhost struct se_portal_group with
1689         * the SCSI Initiator port name of the passed configfs group 'name'.
1690         */
1691        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1692                                se_tpg, (unsigned char *)name);
1693        if (!tv_nexus->tvn_se_sess->se_node_acl) {
1694                mutex_unlock(&tpg->tv_tpg_mutex);
1695                pr_debug("core_tpg_check_initiator_node_acl() failed"
1696                                " for %s\n", name);
1697                transport_free_session(tv_nexus->tvn_se_sess);
1698                kfree(tv_nexus);
1699                return -ENOMEM;
1700        }
1701        /*
1702         * Now register the TCM vhost virtual I_T Nexus as active with the
1703         * call to __transport_register_session()
1704         */
1705        __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1706                        tv_nexus->tvn_se_sess, tv_nexus);
1707        tpg->tpg_nexus = tv_nexus;
1708
1709        mutex_unlock(&tpg->tv_tpg_mutex);
1710        return 0;
1711}
1712
1713static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1714{
1715        struct se_session *se_sess;
1716        struct tcm_vhost_nexus *tv_nexus;
1717
1718        mutex_lock(&tpg->tv_tpg_mutex);
1719        tv_nexus = tpg->tpg_nexus;
1720        if (!tv_nexus) {
1721                mutex_unlock(&tpg->tv_tpg_mutex);
1722                return -ENODEV;
1723        }
1724
1725        se_sess = tv_nexus->tvn_se_sess;
1726        if (!se_sess) {
1727                mutex_unlock(&tpg->tv_tpg_mutex);
1728                return -ENODEV;
1729        }
1730
1731        if (tpg->tv_tpg_port_count != 0) {
1732                mutex_unlock(&tpg->tv_tpg_mutex);
1733                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1734                        " active TPG port count: %d\n",
1735                        tpg->tv_tpg_port_count);
1736                return -EBUSY;
1737        }
1738
1739        if (tpg->tv_tpg_vhost_count != 0) {
1740                mutex_unlock(&tpg->tv_tpg_mutex);
1741                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1742                        " active TPG vhost count: %d\n",
1743                        tpg->tv_tpg_vhost_count);
1744                return -EBUSY;
1745        }
1746
1747        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1748                " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1749                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1750        /*
1751         * Release the SCSI I_T Nexus to the emulated vhost Target Port
1752         */
1753        transport_deregister_session(tv_nexus->tvn_se_sess);
1754        tpg->tpg_nexus = NULL;
1755        mutex_unlock(&tpg->tv_tpg_mutex);
1756
1757        kfree(tv_nexus);
1758        return 0;
1759}
1760
1761static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1762                                        char *page)
1763{
1764        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1765                                struct tcm_vhost_tpg, se_tpg);
1766        struct tcm_vhost_nexus *tv_nexus;
1767        ssize_t ret;
1768
1769        mutex_lock(&tpg->tv_tpg_mutex);
1770        tv_nexus = tpg->tpg_nexus;
1771        if (!tv_nexus) {
1772                mutex_unlock(&tpg->tv_tpg_mutex);
1773                return -ENODEV;
1774        }
1775        ret = snprintf(page, PAGE_SIZE, "%s\n",
1776                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1777        mutex_unlock(&tpg->tv_tpg_mutex);
1778
1779        return ret;
1780}
1781
1782static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1783                                         const char *page,
1784                                         size_t count)
1785{
1786        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1787                                struct tcm_vhost_tpg, se_tpg);
1788        struct tcm_vhost_tport *tport_wwn = tpg->tport;
1789        unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1790        int ret;
1791        /*
1792         * Shutdown the active I_T nexus if 'NULL' is passed..
1793         */
1794        if (!strncmp(page, "NULL", 4)) {
1795                ret = tcm_vhost_drop_nexus(tpg);
1796                return (!ret) ? count : ret;
1797        }
1798        /*
1799         * Otherwise make sure the passed virtual Initiator port WWN matches
1800         * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1801         * tcm_vhost_make_nexus().
1802         */
1803        if (strlen(page) >= TCM_VHOST_NAMELEN) {
1804                pr_err("Emulated NAA Sas Address: %s, exceeds"
1805                                " max: %d\n", page, TCM_VHOST_NAMELEN);
1806                return -EINVAL;
1807        }
1808        snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1809
1810        ptr = strstr(i_port, "naa.");
1811        if (ptr) {
1812                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1813                        pr_err("Passed SAS Initiator Port %s does not"
1814                                " match target port protoid: %s\n", i_port,
1815                                tcm_vhost_dump_proto_id(tport_wwn));
1816                        return -EINVAL;
1817                }
1818                port_ptr = &i_port[0];
1819                goto check_newline;
1820        }
1821        ptr = strstr(i_port, "fc.");
1822        if (ptr) {
1823                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1824                        pr_err("Passed FCP Initiator Port %s does not"
1825                                " match target port protoid: %s\n", i_port,
1826                                tcm_vhost_dump_proto_id(tport_wwn));
1827                        return -EINVAL;
1828                }
1829                port_ptr = &i_port[3]; /* Skip over "fc." */
1830                goto check_newline;
1831        }
1832        ptr = strstr(i_port, "iqn.");
1833        if (ptr) {
1834                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1835                        pr_err("Passed iSCSI Initiator Port %s does not"
1836                                " match target port protoid: %s\n", i_port,
1837                                tcm_vhost_dump_proto_id(tport_wwn));
1838                        return -EINVAL;
1839                }
1840                port_ptr = &i_port[0];
1841                goto check_newline;
1842        }
1843        pr_err("Unable to locate prefix for emulated Initiator Port:"
1844                        " %s\n", i_port);
1845        return -EINVAL;
1846        /*
1847         * Clear any trailing newline for the NAA WWN
1848         */
1849check_newline:
1850        if (i_port[strlen(i_port)-1] == '\n')
1851                i_port[strlen(i_port)-1] = '\0';
1852
1853        ret = tcm_vhost_make_nexus(tpg, port_ptr);
1854        if (ret < 0)
1855                return ret;
1856
1857        return count;
1858}
1859
1860TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1861
1862static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1863        &tcm_vhost_tpg_nexus.attr,
1864        NULL,
1865};
1866
1867static struct se_portal_group *
1868tcm_vhost_make_tpg(struct se_wwn *wwn,
1869                   struct config_group *group,
1870                   const char *name)
1871{
1872        struct tcm_vhost_tport *tport = container_of(wwn,
1873                        struct tcm_vhost_tport, tport_wwn);
1874
1875        struct tcm_vhost_tpg *tpg;
1876        unsigned long tpgt;
1877        int ret;
1878
1879        if (strstr(name, "tpgt_") != name)
1880                return ERR_PTR(-EINVAL);
1881        if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1882                return ERR_PTR(-EINVAL);
1883
1884        tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1885        if (!tpg) {
1886                pr_err("Unable to allocate struct tcm_vhost_tpg");
1887                return ERR_PTR(-ENOMEM);
1888        }
1889        mutex_init(&tpg->tv_tpg_mutex);
1890        INIT_LIST_HEAD(&tpg->tv_tpg_list);
1891        tpg->tport = tport;
1892        tpg->tport_tpgt = tpgt;
1893
1894        ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1895                                &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1896        if (ret < 0) {
1897                kfree(tpg);
1898                return NULL;
1899        }
1900        mutex_lock(&tcm_vhost_mutex);
1901        list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1902        mutex_unlock(&tcm_vhost_mutex);
1903
1904        return &tpg->se_tpg;
1905}
1906
1907static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1908{
1909        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1910                                struct tcm_vhost_tpg, se_tpg);
1911
1912        mutex_lock(&tcm_vhost_mutex);
1913        list_del(&tpg->tv_tpg_list);
1914        mutex_unlock(&tcm_vhost_mutex);
1915        /*
1916         * Release the virtual I_T Nexus for this vhost TPG
1917         */
1918        tcm_vhost_drop_nexus(tpg);
1919        /*
1920         * Deregister the se_tpg from TCM..
1921         */
1922        core_tpg_deregister(se_tpg);
1923        kfree(tpg);
1924}
1925
1926static struct se_wwn *
1927tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1928                     struct config_group *group,
1929                     const char *name)
1930{
1931        struct tcm_vhost_tport *tport;
1932        char *ptr;
1933        u64 wwpn = 0;
1934        int off = 0;
1935
1936        /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1937                return ERR_PTR(-EINVAL); */
1938
1939        tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1940        if (!tport) {
1941                pr_err("Unable to allocate struct tcm_vhost_tport");
1942                return ERR_PTR(-ENOMEM);
1943        }
1944        tport->tport_wwpn = wwpn;
1945        /*
1946         * Determine the emulated Protocol Identifier and Target Port Name
1947         * based on the incoming configfs directory name.
1948         */
1949        ptr = strstr(name, "naa.");
1950        if (ptr) {
1951                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1952                goto check_len;
1953        }
1954        ptr = strstr(name, "fc.");
1955        if (ptr) {
1956                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1957                off = 3; /* Skip over "fc." */
1958                goto check_len;
1959        }
1960        ptr = strstr(name, "iqn.");
1961        if (ptr) {
1962                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1963                goto check_len;
1964        }
1965
1966        pr_err("Unable to locate prefix for emulated Target Port:"
1967                        " %s\n", name);
1968        kfree(tport);
1969        return ERR_PTR(-EINVAL);
1970
1971check_len:
1972        if (strlen(name) >= TCM_VHOST_NAMELEN) {
1973                pr_err("Emulated %s Address: %s, exceeds"
1974                        " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1975                        TCM_VHOST_NAMELEN);
1976                kfree(tport);
1977                return ERR_PTR(-EINVAL);
1978        }
1979        snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1980
1981        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1982                " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1983
1984        return &tport->tport_wwn;
1985}
1986
1987static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1988{
1989        struct tcm_vhost_tport *tport = container_of(wwn,
1990                                struct tcm_vhost_tport, tport_wwn);
1991
1992        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1993                " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1994                tport->tport_name);
1995
1996        kfree(tport);
1997}
1998
1999static ssize_t
2000tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2001                                char *page)
2002{
2003        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2004                "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2005                utsname()->machine);
2006}
2007
2008TF_WWN_ATTR_RO(tcm_vhost, version);
2009
2010static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2011        &tcm_vhost_wwn_version.attr,
2012        NULL,
2013};
2014
2015static struct target_core_fabric_ops tcm_vhost_ops = {
2016        .get_fabric_name                = tcm_vhost_get_fabric_name,
2017        .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2018        .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2019        .tpg_get_tag                    = tcm_vhost_get_tag,
2020        .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2021        .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2022        .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2023        .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2024        .tpg_check_demo_mode            = tcm_vhost_check_true,
2025        .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2026        .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2027        .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2028        .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2029        .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2030        .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2031        .release_cmd                    = tcm_vhost_release_cmd,
2032        .check_stop_free                = vhost_scsi_check_stop_free,
2033        .shutdown_session               = tcm_vhost_shutdown_session,
2034        .close_session                  = tcm_vhost_close_session,
2035        .sess_get_index                 = tcm_vhost_sess_get_index,
2036        .sess_get_initiator_sid         = NULL,
2037        .write_pending                  = tcm_vhost_write_pending,
2038        .write_pending_status           = tcm_vhost_write_pending_status,
2039        .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2040        .get_task_tag                   = tcm_vhost_get_task_tag,
2041        .get_cmd_state                  = tcm_vhost_get_cmd_state,
2042        .queue_data_in                  = tcm_vhost_queue_data_in,
2043        .queue_status                   = tcm_vhost_queue_status,
2044        .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2045        /*
2046         * Setup callers for generic logic in target_core_fabric_configfs.c
2047         */
2048        .fabric_make_wwn                = tcm_vhost_make_tport,
2049        .fabric_drop_wwn                = tcm_vhost_drop_tport,
2050        .fabric_make_tpg                = tcm_vhost_make_tpg,
2051        .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2052        .fabric_post_link               = tcm_vhost_port_link,
2053        .fabric_pre_unlink              = tcm_vhost_port_unlink,
2054        .fabric_make_np                 = NULL,
2055        .fabric_drop_np                 = NULL,
2056        .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2057        .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2058};
2059
2060static int tcm_vhost_register_configfs(void)
2061{
2062        struct target_fabric_configfs *fabric;
2063        int ret;
2064
2065        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2066                " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2067                utsname()->machine);
2068        /*
2069         * Register the top level struct config_item_type with TCM core
2070         */
2071        fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2072        if (IS_ERR(fabric)) {
2073                pr_err("target_fabric_configfs_init() failed\n");
2074                return PTR_ERR(fabric);
2075        }
2076        /*
2077         * Setup fabric->tf_ops from our local tcm_vhost_ops
2078         */
2079        fabric->tf_ops = tcm_vhost_ops;
2080        /*
2081         * Setup default attribute lists for various fabric->tf_cit_tmpl
2082         */
2083        TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2084        TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2085        TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2086        TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2087        TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2088        TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2089        TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2090        TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2091        TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2092        /*
2093         * Register the fabric for use within TCM
2094         */
2095        ret = target_fabric_configfs_register(fabric);
2096        if (ret < 0) {
2097                pr_err("target_fabric_configfs_register() failed"
2098                                " for TCM_VHOST\n");
2099                return ret;
2100        }
2101        /*
2102         * Setup our local pointer to *fabric
2103         */
2104        tcm_vhost_fabric_configfs = fabric;
2105        pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2106        return 0;
2107};
2108
2109static void tcm_vhost_deregister_configfs(void)
2110{
2111        if (!tcm_vhost_fabric_configfs)
2112                return;
2113
2114        target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2115        tcm_vhost_fabric_configfs = NULL;
2116        pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2117};
2118
2119static int __init tcm_vhost_init(void)
2120{
2121        int ret = -ENOMEM;
2122        /*
2123         * Use our own dedicated workqueue for submitting I/O into
2124         * target core to avoid contention within system_wq.
2125         */
2126        tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2127        if (!tcm_vhost_workqueue)
2128                goto out;
2129
2130        ret = vhost_scsi_register();
2131        if (ret < 0)
2132                goto out_destroy_workqueue;
2133
2134        ret = tcm_vhost_register_configfs();
2135        if (ret < 0)
2136                goto out_vhost_scsi_deregister;
2137
2138        return 0;
2139
2140out_vhost_scsi_deregister:
2141        vhost_scsi_deregister();
2142out_destroy_workqueue:
2143        destroy_workqueue(tcm_vhost_workqueue);
2144out:
2145        return ret;
2146};
2147
2148static void tcm_vhost_exit(void)
2149{
2150        tcm_vhost_deregister_configfs();
2151        vhost_scsi_deregister();
2152        destroy_workqueue(tcm_vhost_workqueue);
2153};
2154
2155MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2156MODULE_ALIAS("tcm_vhost");
2157MODULE_LICENSE("GPL");
2158module_init(tcm_vhost_init);
2159module_exit(tcm_vhost_exit);
2160
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.