linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * QLogic Fibre Channel HBA Driver
   4 * Copyright (c)  2003-2014 QLogic Corporation
   5 */
   6#include "qla_def.h"
   7#include "qla_target.h"
   8
   9#include <linux/blkdev.h>
  10#include <linux/delay.h>
  11
  12#include <scsi/scsi_tcq.h>
  13
  14/**
  15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  16 * @sp: SCSI command
  17 *
  18 * Returns the proper CF_* direction based on CDB.
  19 */
  20static inline uint16_t
  21qla2x00_get_cmd_direction(srb_t *sp)
  22{
  23        uint16_t cflags;
  24        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  25        struct scsi_qla_host *vha = sp->vha;
  26
  27        cflags = 0;
  28
  29        /* Set transfer direction */
  30        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  31                cflags = CF_WRITE;
  32                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  33                vha->qla_stats.output_requests++;
  34        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  35                cflags = CF_READ;
  36                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  37                vha->qla_stats.input_requests++;
  38        }
  39        return (cflags);
  40}
  41
  42/**
  43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  44 * Continuation Type 0 IOCBs to allocate.
  45 *
  46 * @dsds: number of data segment descriptors needed
  47 *
  48 * Returns the number of IOCB entries needed to store @dsds.
  49 */
  50uint16_t
  51qla2x00_calc_iocbs_32(uint16_t dsds)
  52{
  53        uint16_t iocbs;
  54
  55        iocbs = 1;
  56        if (dsds > 3) {
  57                iocbs += (dsds - 3) / 7;
  58                if ((dsds - 3) % 7)
  59                        iocbs++;
  60        }
  61        return (iocbs);
  62}
  63
  64/**
  65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  66 * Continuation Type 1 IOCBs to allocate.
  67 *
  68 * @dsds: number of data segment descriptors needed
  69 *
  70 * Returns the number of IOCB entries needed to store @dsds.
  71 */
  72uint16_t
  73qla2x00_calc_iocbs_64(uint16_t dsds)
  74{
  75        uint16_t iocbs;
  76
  77        iocbs = 1;
  78        if (dsds > 2) {
  79                iocbs += (dsds - 2) / 5;
  80                if ((dsds - 2) % 5)
  81                        iocbs++;
  82        }
  83        return (iocbs);
  84}
  85
  86/**
  87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  88 * @vha: HA context
  89 *
  90 * Returns a pointer to the Continuation Type 0 IOCB packet.
  91 */
  92static inline cont_entry_t *
  93qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  94{
  95        cont_entry_t *cont_pkt;
  96        struct req_que *req = vha->req;
  97        /* Adjust ring index. */
  98        req->ring_index++;
  99        if (req->ring_index == req->length) {
 100                req->ring_index = 0;
 101                req->ring_ptr = req->ring;
 102        } else {
 103                req->ring_ptr++;
 104        }
 105
 106        cont_pkt = (cont_entry_t *)req->ring_ptr;
 107
 108        /* Load packet defaults. */
 109        put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 110
 111        return (cont_pkt);
 112}
 113
 114/**
 115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 116 * @vha: HA context
 117 * @req: request queue
 118 *
 119 * Returns a pointer to the continuation type 1 IOCB packet.
 120 */
 121static inline cont_a64_entry_t *
 122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 123{
 124        cont_a64_entry_t *cont_pkt;
 125
 126        /* Adjust ring index. */
 127        req->ring_index++;
 128        if (req->ring_index == req->length) {
 129                req->ring_index = 0;
 130                req->ring_ptr = req->ring;
 131        } else {
 132                req->ring_ptr++;
 133        }
 134
 135        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 136
 137        /* Load packet defaults. */
 138        put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
 139                           CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 140
 141        return (cont_pkt);
 142}
 143
 144inline int
 145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 146{
 147        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 148        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 149
 150        /* We always use DIFF Bundling for best performance */
 151        *fw_prot_opts = 0;
 152
 153        /* Translate SCSI opcode to a protection opcode */
 154        switch (scsi_get_prot_op(cmd)) {
 155        case SCSI_PROT_READ_STRIP:
 156                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 157                break;
 158        case SCSI_PROT_WRITE_INSERT:
 159                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 160                break;
 161        case SCSI_PROT_READ_INSERT:
 162                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 163                break;
 164        case SCSI_PROT_WRITE_STRIP:
 165                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 166                break;
 167        case SCSI_PROT_READ_PASS:
 168        case SCSI_PROT_WRITE_PASS:
 169                if (guard & SHOST_DIX_GUARD_IP)
 170                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 171                else
 172                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 173                break;
 174        default:        /* Normal Request */
 175                *fw_prot_opts |= PO_MODE_DIF_PASS;
 176                break;
 177        }
 178
 179        return scsi_prot_sg_count(cmd);
 180}
 181
 182/*
 183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 184 * capable IOCB types.
 185 *
 186 * @sp: SRB command to process
 187 * @cmd_pkt: Command type 2 IOCB
 188 * @tot_dsds: Total number of segments to transfer
 189 */
 190void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 191    uint16_t tot_dsds)
 192{
 193        uint16_t        avail_dsds;
 194        struct dsd32    *cur_dsd;
 195        scsi_qla_host_t *vha;
 196        struct scsi_cmnd *cmd;
 197        struct scatterlist *sg;
 198        int i;
 199
 200        cmd = GET_CMD_SP(sp);
 201
 202        /* Update entry type to indicate Command Type 2 IOCB */
 203        put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 204
 205        /* No data transfer */
 206        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 207                cmd_pkt->byte_count = cpu_to_le32(0);
 208                return;
 209        }
 210
 211        vha = sp->vha;
 212        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 213
 214        /* Three DSDs are available in the Command Type 2 IOCB */
 215        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
 216        cur_dsd = cmd_pkt->dsd32;
 217
 218        /* Load data segments */
 219        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 220                cont_entry_t *cont_pkt;
 221
 222                /* Allocate additional continuation packets? */
 223                if (avail_dsds == 0) {
 224                        /*
 225                         * Seven DSDs are available in the Continuation
 226                         * Type 0 IOCB.
 227                         */
 228                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 229                        cur_dsd = cont_pkt->dsd;
 230                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 231                }
 232
 233                append_dsd32(&cur_dsd, sg);
 234                avail_dsds--;
 235        }
 236}
 237
 238/**
 239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 240 * capable IOCB types.
 241 *
 242 * @sp: SRB command to process
 243 * @cmd_pkt: Command type 3 IOCB
 244 * @tot_dsds: Total number of segments to transfer
 245 */
 246void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 247    uint16_t tot_dsds)
 248{
 249        uint16_t        avail_dsds;
 250        struct dsd64    *cur_dsd;
 251        scsi_qla_host_t *vha;
 252        struct scsi_cmnd *cmd;
 253        struct scatterlist *sg;
 254        int i;
 255
 256        cmd = GET_CMD_SP(sp);
 257
 258        /* Update entry type to indicate Command Type 3 IOCB */
 259        put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 260
 261        /* No data transfer */
 262        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 263                cmd_pkt->byte_count = cpu_to_le32(0);
 264                return;
 265        }
 266
 267        vha = sp->vha;
 268        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 269
 270        /* Two DSDs are available in the Command Type 3 IOCB */
 271        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
 272        cur_dsd = cmd_pkt->dsd64;
 273
 274        /* Load data segments */
 275        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 276                cont_a64_entry_t *cont_pkt;
 277
 278                /* Allocate additional continuation packets? */
 279                if (avail_dsds == 0) {
 280                        /*
 281                         * Five DSDs are available in the Continuation
 282                         * Type 1 IOCB.
 283                         */
 284                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 285                        cur_dsd = cont_pkt->dsd;
 286                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 287                }
 288
 289                append_dsd64(&cur_dsd, sg);
 290                avail_dsds--;
 291        }
 292}
 293
 294/*
 295 * Find the first handle that is not in use, starting from
 296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
 297 * associated with @req.
 298 */
 299uint32_t qla2xxx_get_next_handle(struct req_que *req)
 300{
 301        uint32_t index, handle = req->current_outstanding_cmd;
 302
 303        for (index = 1; index < req->num_outstanding_cmds; index++) {
 304                handle++;
 305                if (handle == req->num_outstanding_cmds)
 306                        handle = 1;
 307                if (!req->outstanding_cmds[handle])
 308                        return handle;
 309        }
 310
 311        return 0;
 312}
 313
 314/**
 315 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 316 * @sp: command to send to the ISP
 317 *
 318 * Returns non-zero if a failure occurred, else zero.
 319 */
 320int
 321qla2x00_start_scsi(srb_t *sp)
 322{
 323        int             nseg;
 324        unsigned long   flags;
 325        scsi_qla_host_t *vha;
 326        struct scsi_cmnd *cmd;
 327        uint32_t        *clr_ptr;
 328        uint32_t        handle;
 329        cmd_entry_t     *cmd_pkt;
 330        uint16_t        cnt;
 331        uint16_t        req_cnt;
 332        uint16_t        tot_dsds;
 333        struct device_reg_2xxx __iomem *reg;
 334        struct qla_hw_data *ha;
 335        struct req_que *req;
 336        struct rsp_que *rsp;
 337
 338        /* Setup device pointers. */
 339        vha = sp->vha;
 340        ha = vha->hw;
 341        reg = &ha->iobase->isp;
 342        cmd = GET_CMD_SP(sp);
 343        req = ha->req_q_map[0];
 344        rsp = ha->rsp_q_map[0];
 345        /* So we know we haven't pci_map'ed anything yet */
 346        tot_dsds = 0;
 347
 348        /* Send marker if required */
 349        if (vha->marker_needed != 0) {
 350                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 351                    QLA_SUCCESS) {
 352                        return (QLA_FUNCTION_FAILED);
 353                }
 354                vha->marker_needed = 0;
 355        }
 356
 357        /* Acquire ring specific lock */
 358        spin_lock_irqsave(&ha->hardware_lock, flags);
 359
 360        handle = qla2xxx_get_next_handle(req);
 361        if (handle == 0)
 362                goto queuing_error;
 363
 364        /* Map the sg table so we have an accurate count of sg entries needed */
 365        if (scsi_sg_count(cmd)) {
 366                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 367                    scsi_sg_count(cmd), cmd->sc_data_direction);
 368                if (unlikely(!nseg))
 369                        goto queuing_error;
 370        } else
 371                nseg = 0;
 372
 373        tot_dsds = nseg;
 374
 375        /* Calculate the number of request entries needed. */
 376        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 377        if (req->cnt < (req_cnt + 2)) {
 378                cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
 379                if (req->ring_index < cnt)
 380                        req->cnt = cnt - req->ring_index;
 381                else
 382                        req->cnt = req->length -
 383                            (req->ring_index - cnt);
 384                /* If still no head room then bail out */
 385                if (req->cnt < (req_cnt + 2))
 386                        goto queuing_error;
 387        }
 388
 389        /* Build command packet */
 390        req->current_outstanding_cmd = handle;
 391        req->outstanding_cmds[handle] = sp;
 392        sp->handle = handle;
 393        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 394        req->cnt -= req_cnt;
 395
 396        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 397        cmd_pkt->handle = handle;
 398        /* Zero out remaining portion of packet. */
 399        clr_ptr = (uint32_t *)cmd_pkt + 2;
 400        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 401        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 402
 403        /* Set target ID and LUN number*/
 404        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 405        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 406        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 407
 408        /* Load SCSI command packet. */
 409        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 410        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 411
 412        /* Build IOCB segments */
 413        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 414
 415        /* Set total data segment count. */
 416        cmd_pkt->entry_count = (uint8_t)req_cnt;
 417        wmb();
 418
 419        /* Adjust ring index. */
 420        req->ring_index++;
 421        if (req->ring_index == req->length) {
 422                req->ring_index = 0;
 423                req->ring_ptr = req->ring;
 424        } else
 425                req->ring_ptr++;
 426
 427        sp->flags |= SRB_DMA_VALID;
 428
 429        /* Set chip new ring index. */
 430        wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 431        rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 432
 433        /* Manage unprocessed RIO/ZIO commands in response queue. */
 434        if (vha->flags.process_response_queue &&
 435            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 436                qla2x00_process_response_queue(rsp);
 437
 438        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 439        return (QLA_SUCCESS);
 440
 441queuing_error:
 442        if (tot_dsds)
 443                scsi_dma_unmap(cmd);
 444
 445        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 446
 447        return (QLA_FUNCTION_FAILED);
 448}
 449
 450/**
 451 * qla2x00_start_iocbs() - Execute the IOCB command
 452 * @vha: HA context
 453 * @req: request queue
 454 */
 455void
 456qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 457{
 458        struct qla_hw_data *ha = vha->hw;
 459        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 460
 461        if (IS_P3P_TYPE(ha)) {
 462                qla82xx_start_iocbs(vha);
 463        } else {
 464                /* Adjust ring index. */
 465                req->ring_index++;
 466                if (req->ring_index == req->length) {
 467                        req->ring_index = 0;
 468                        req->ring_ptr = req->ring;
 469                } else
 470                        req->ring_ptr++;
 471
 472                /* Set chip new ring index. */
 473                if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 474                        wrt_reg_dword(req->req_q_in, req->ring_index);
 475                } else if (IS_QLA83XX(ha)) {
 476                        wrt_reg_dword(req->req_q_in, req->ring_index);
 477                        rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
 478                } else if (IS_QLAFX00(ha)) {
 479                        wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
 480                        rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
 481                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 482                } else if (IS_FWI2_CAPABLE(ha)) {
 483                        wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
 484                        rd_reg_dword_relaxed(&reg->isp24.req_q_in);
 485                } else {
 486                        wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
 487                                req->ring_index);
 488                        rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
 489                }
 490        }
 491}
 492
 493/**
 494 * __qla2x00_marker() - Send a marker IOCB to the firmware.
 495 * @vha: HA context
 496 * @qpair: queue pair pointer
 497 * @loop_id: loop ID
 498 * @lun: LUN
 499 * @type: marker modifier
 500 *
 501 * Can be called from both normal and interrupt context.
 502 *
 503 * Returns non-zero if a failure occurred, else zero.
 504 */
 505static int
 506__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 507    uint16_t loop_id, uint64_t lun, uint8_t type)
 508{
 509        mrk_entry_t *mrk;
 510        struct mrk_entry_24xx *mrk24 = NULL;
 511        struct req_que *req = qpair->req;
 512        struct qla_hw_data *ha = vha->hw;
 513        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 514
 515        mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 516        if (mrk == NULL) {
 517                ql_log(ql_log_warn, base_vha, 0x3026,
 518                    "Failed to allocate Marker IOCB.\n");
 519
 520                return (QLA_FUNCTION_FAILED);
 521        }
 522
 523        mrk->entry_type = MARKER_TYPE;
 524        mrk->modifier = type;
 525        if (type != MK_SYNC_ALL) {
 526                if (IS_FWI2_CAPABLE(ha)) {
 527                        mrk24 = (struct mrk_entry_24xx *) mrk;
 528                        mrk24->nport_handle = cpu_to_le16(loop_id);
 529                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 530                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 531                        mrk24->vp_index = vha->vp_idx;
 532                        mrk24->handle = make_handle(req->id, mrk24->handle);
 533                } else {
 534                        SET_TARGET_ID(ha, mrk->target, loop_id);
 535                        mrk->lun = cpu_to_le16((uint16_t)lun);
 536                }
 537        }
 538        wmb();
 539
 540        qla2x00_start_iocbs(vha, req);
 541
 542        return (QLA_SUCCESS);
 543}
 544
 545int
 546qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 547    uint16_t loop_id, uint64_t lun, uint8_t type)
 548{
 549        int ret;
 550        unsigned long flags = 0;
 551
 552        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 553        ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
 554        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 555
 556        return (ret);
 557}
 558
 559/*
 560 * qla2x00_issue_marker
 561 *
 562 * Issue marker
 563 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 564 * Might release it, then reaquire.
 565 */
 566int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 567{
 568        if (ha_locked) {
 569                if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 570                                        MK_SYNC_ALL) != QLA_SUCCESS)
 571                        return QLA_FUNCTION_FAILED;
 572        } else {
 573                if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 574                                        MK_SYNC_ALL) != QLA_SUCCESS)
 575                        return QLA_FUNCTION_FAILED;
 576        }
 577        vha->marker_needed = 0;
 578
 579        return QLA_SUCCESS;
 580}
 581
 582static inline int
 583qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 584        uint16_t tot_dsds)
 585{
 586        struct dsd64 *cur_dsd = NULL, *next_dsd;
 587        scsi_qla_host_t *vha;
 588        struct qla_hw_data *ha;
 589        struct scsi_cmnd *cmd;
 590        struct  scatterlist *cur_seg;
 591        uint8_t avail_dsds;
 592        uint8_t first_iocb = 1;
 593        uint32_t dsd_list_len;
 594        struct dsd_dma *dsd_ptr;
 595        struct ct6_dsd *ctx;
 596        struct qla_qpair *qpair = sp->qpair;
 597
 598        cmd = GET_CMD_SP(sp);
 599
 600        /* Update entry type to indicate Command Type 3 IOCB */
 601        put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 602
 603        /* No data transfer */
 604        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 605                cmd_pkt->byte_count = cpu_to_le32(0);
 606                return 0;
 607        }
 608
 609        vha = sp->vha;
 610        ha = vha->hw;
 611
 612        /* Set transfer direction */
 613        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 614                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 615                qpair->counters.output_bytes += scsi_bufflen(cmd);
 616                qpair->counters.output_requests++;
 617        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 618                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 619                qpair->counters.input_bytes += scsi_bufflen(cmd);
 620                qpair->counters.input_requests++;
 621        }
 622
 623        cur_seg = scsi_sglist(cmd);
 624        ctx = sp->u.scmd.ct6_ctx;
 625
 626        while (tot_dsds) {
 627                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 628                    QLA_DSDS_PER_IOCB : tot_dsds;
 629                tot_dsds -= avail_dsds;
 630                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 631
 632                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 633                    struct dsd_dma, list);
 634                next_dsd = dsd_ptr->dsd_addr;
 635                list_del(&dsd_ptr->list);
 636                ha->gbl_dsd_avail--;
 637                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 638                ctx->dsd_use_cnt++;
 639                ha->gbl_dsd_inuse++;
 640
 641                if (first_iocb) {
 642                        first_iocb = 0;
 643                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 644                                           &cmd_pkt->fcp_dsd.address);
 645                        cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 646                } else {
 647                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 648                                           &cur_dsd->address);
 649                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 650                        cur_dsd++;
 651                }
 652                cur_dsd = next_dsd;
 653                while (avail_dsds) {
 654                        append_dsd64(&cur_dsd, cur_seg);
 655                        cur_seg = sg_next(cur_seg);
 656                        avail_dsds--;
 657                }
 658        }
 659
 660        /* Null termination */
 661        cur_dsd->address = 0;
 662        cur_dsd->length = 0;
 663        cur_dsd++;
 664        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
 665        return 0;
 666}
 667
 668/*
 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 670 * for Command Type 6.
 671 *
 672 * @dsds: number of data segment descriptors needed
 673 *
 674 * Returns the number of dsd list needed to store @dsds.
 675 */
 676static inline uint16_t
 677qla24xx_calc_dsd_lists(uint16_t dsds)
 678{
 679        uint16_t dsd_lists = 0;
 680
 681        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 682        if (dsds % QLA_DSDS_PER_IOCB)
 683                dsd_lists++;
 684        return dsd_lists;
 685}
 686
 687
 688/**
 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 690 * IOCB types.
 691 *
 692 * @sp: SRB command to process
 693 * @cmd_pkt: Command type 3 IOCB
 694 * @tot_dsds: Total number of segments to transfer
 695 * @req: pointer to request queue
 696 */
 697inline void
 698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 699        uint16_t tot_dsds, struct req_que *req)
 700{
 701        uint16_t        avail_dsds;
 702        struct dsd64    *cur_dsd;
 703        scsi_qla_host_t *vha;
 704        struct scsi_cmnd *cmd;
 705        struct scatterlist *sg;
 706        int i;
 707        struct qla_qpair *qpair = sp->qpair;
 708
 709        cmd = GET_CMD_SP(sp);
 710
 711        /* Update entry type to indicate Command Type 3 IOCB */
 712        put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 713
 714        /* No data transfer */
 715        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 716                cmd_pkt->byte_count = cpu_to_le32(0);
 717                return;
 718        }
 719
 720        vha = sp->vha;
 721
 722        /* Set transfer direction */
 723        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 724                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 725                qpair->counters.output_bytes += scsi_bufflen(cmd);
 726                qpair->counters.output_requests++;
 727        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 728                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 729                qpair->counters.input_bytes += scsi_bufflen(cmd);
 730                qpair->counters.input_requests++;
 731        }
 732
 733        /* One DSD is available in the Command Type 3 IOCB */
 734        avail_dsds = 1;
 735        cur_dsd = &cmd_pkt->dsd;
 736
 737        /* Load data segments */
 738
 739        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 740                cont_a64_entry_t *cont_pkt;
 741
 742                /* Allocate additional continuation packets? */
 743                if (avail_dsds == 0) {
 744                        /*
 745                         * Five DSDs are available in the Continuation
 746                         * Type 1 IOCB.
 747                         */
 748                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 749                        cur_dsd = cont_pkt->dsd;
 750                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 751                }
 752
 753                append_dsd64(&cur_dsd, sg);
 754                avail_dsds--;
 755        }
 756}
 757
 758struct fw_dif_context {
 759        __le32  ref_tag;
 760        __le16  app_tag;
 761        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 762        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 763};
 764
 765/*
 766 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 767 *
 768 */
 769static inline void
 770qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 771    unsigned int protcnt)
 772{
 773        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 774
 775        switch (scsi_get_prot_type(cmd)) {
 776        case SCSI_PROT_DIF_TYPE0:
 777                /*
 778                 * No check for ql2xenablehba_err_chk, as it would be an
 779                 * I/O error if hba tag generation is not done.
 780                 */
 781                pkt->ref_tag = cpu_to_le32((uint32_t)
 782                    (0xffffffff & scsi_get_lba(cmd)));
 783
 784                if (!qla2x00_hba_err_chk_enabled(sp))
 785                        break;
 786
 787                pkt->ref_tag_mask[0] = 0xff;
 788                pkt->ref_tag_mask[1] = 0xff;
 789                pkt->ref_tag_mask[2] = 0xff;
 790                pkt->ref_tag_mask[3] = 0xff;
 791                break;
 792
 793        /*
 794         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 795         * match LBA in CDB + N
 796         */
 797        case SCSI_PROT_DIF_TYPE2:
 798                pkt->app_tag = cpu_to_le16(0);
 799                pkt->app_tag_mask[0] = 0x0;
 800                pkt->app_tag_mask[1] = 0x0;
 801
 802                pkt->ref_tag = cpu_to_le32((uint32_t)
 803                    (0xffffffff & scsi_get_lba(cmd)));
 804
 805                if (!qla2x00_hba_err_chk_enabled(sp))
 806                        break;
 807
 808                /* enable ALL bytes of the ref tag */
 809                pkt->ref_tag_mask[0] = 0xff;
 810                pkt->ref_tag_mask[1] = 0xff;
 811                pkt->ref_tag_mask[2] = 0xff;
 812                pkt->ref_tag_mask[3] = 0xff;
 813                break;
 814
 815        /* For Type 3 protection: 16 bit GUARD only */
 816        case SCSI_PROT_DIF_TYPE3:
 817                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 818                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 819                                                                0x00;
 820                break;
 821
 822        /*
 823         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 824         * 16 bit app tag.
 825         */
 826        case SCSI_PROT_DIF_TYPE1:
 827                pkt->ref_tag = cpu_to_le32((uint32_t)
 828                    (0xffffffff & scsi_get_lba(cmd)));
 829                pkt->app_tag = cpu_to_le16(0);
 830                pkt->app_tag_mask[0] = 0x0;
 831                pkt->app_tag_mask[1] = 0x0;
 832
 833                if (!qla2x00_hba_err_chk_enabled(sp))
 834                        break;
 835
 836                /* enable ALL bytes of the ref tag */
 837                pkt->ref_tag_mask[0] = 0xff;
 838                pkt->ref_tag_mask[1] = 0xff;
 839                pkt->ref_tag_mask[2] = 0xff;
 840                pkt->ref_tag_mask[3] = 0xff;
 841                break;
 842        }
 843}
 844
 845int
 846qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 847        uint32_t *partial)
 848{
 849        struct scatterlist *sg;
 850        uint32_t cumulative_partial, sg_len;
 851        dma_addr_t sg_dma_addr;
 852
 853        if (sgx->num_bytes == sgx->tot_bytes)
 854                return 0;
 855
 856        sg = sgx->cur_sg;
 857        cumulative_partial = sgx->tot_partial;
 858
 859        sg_dma_addr = sg_dma_address(sg);
 860        sg_len = sg_dma_len(sg);
 861
 862        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 863
 864        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 865                sgx->dma_len = (blk_sz - cumulative_partial);
 866                sgx->tot_partial = 0;
 867                sgx->num_bytes += blk_sz;
 868                *partial = 0;
 869        } else {
 870                sgx->dma_len = sg_len - sgx->bytes_consumed;
 871                sgx->tot_partial += sgx->dma_len;
 872                *partial = 1;
 873        }
 874
 875        sgx->bytes_consumed += sgx->dma_len;
 876
 877        if (sg_len == sgx->bytes_consumed) {
 878                sg = sg_next(sg);
 879                sgx->num_sg++;
 880                sgx->cur_sg = sg;
 881                sgx->bytes_consumed = 0;
 882        }
 883
 884        return 1;
 885}
 886
 887int
 888qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 889        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 890{
 891        void *next_dsd;
 892        uint8_t avail_dsds = 0;
 893        uint32_t dsd_list_len;
 894        struct dsd_dma *dsd_ptr;
 895        struct scatterlist *sg_prot;
 896        struct dsd64 *cur_dsd = dsd;
 897        uint16_t        used_dsds = tot_dsds;
 898        uint32_t        prot_int; /* protection interval */
 899        uint32_t        partial;
 900        struct qla2_sgx sgx;
 901        dma_addr_t      sle_dma;
 902        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 903        struct scsi_cmnd *cmd;
 904
 905        memset(&sgx, 0, sizeof(struct qla2_sgx));
 906        if (sp) {
 907                cmd = GET_CMD_SP(sp);
 908                prot_int = cmd->device->sector_size;
 909
 910                sgx.tot_bytes = scsi_bufflen(cmd);
 911                sgx.cur_sg = scsi_sglist(cmd);
 912                sgx.sp = sp;
 913
 914                sg_prot = scsi_prot_sglist(cmd);
 915        } else if (tc) {
 916                prot_int      = tc->blk_sz;
 917                sgx.tot_bytes = tc->bufflen;
 918                sgx.cur_sg    = tc->sg;
 919                sg_prot       = tc->prot_sg;
 920        } else {
 921                BUG();
 922                return 1;
 923        }
 924
 925        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 926
 927                sle_dma = sgx.dma_addr;
 928                sle_dma_len = sgx.dma_len;
 929alloc_and_fill:
 930                /* Allocate additional continuation packets? */
 931                if (avail_dsds == 0) {
 932                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 933                                        QLA_DSDS_PER_IOCB : used_dsds;
 934                        dsd_list_len = (avail_dsds + 1) * 12;
 935                        used_dsds -= avail_dsds;
 936
 937                        /* allocate tracking DS */
 938                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 939                        if (!dsd_ptr)
 940                                return 1;
 941
 942                        /* allocate new list */
 943                        dsd_ptr->dsd_addr = next_dsd =
 944                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 945                                &dsd_ptr->dsd_list_dma);
 946
 947                        if (!next_dsd) {
 948                                /*
 949                                 * Need to cleanup only this dsd_ptr, rest
 950                                 * will be done by sp_free_dma()
 951                                 */
 952                                kfree(dsd_ptr);
 953                                return 1;
 954                        }
 955
 956                        if (sp) {
 957                                list_add_tail(&dsd_ptr->list,
 958                                              &sp->u.scmd.crc_ctx->dsd_list);
 959
 960                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 961                        } else {
 962                                list_add_tail(&dsd_ptr->list,
 963                                    &(tc->ctx->dsd_list));
 964                                *tc->ctx_dsd_alloced = 1;
 965                        }
 966
 967
 968                        /* add new list to cmd iocb or last list */
 969                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 970                                           &cur_dsd->address);
 971                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 972                        cur_dsd = next_dsd;
 973                }
 974                put_unaligned_le64(sle_dma, &cur_dsd->address);
 975                cur_dsd->length = cpu_to_le32(sle_dma_len);
 976                cur_dsd++;
 977                avail_dsds--;
 978
 979                if (partial == 0) {
 980                        /* Got a full protection interval */
 981                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 982                        sle_dma_len = 8;
 983
 984                        tot_prot_dma_len += sle_dma_len;
 985                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 986                                tot_prot_dma_len = 0;
 987                                sg_prot = sg_next(sg_prot);
 988                        }
 989
 990                        partial = 1; /* So as to not re-enter this block */
 991                        goto alloc_and_fill;
 992                }
 993        }
 994        /* Null termination */
 995        cur_dsd->address = 0;
 996        cur_dsd->length = 0;
 997        cur_dsd++;
 998        return 0;
 999}
1000
1001int
1002qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1003        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1004{
1005        void *next_dsd;
1006        uint8_t avail_dsds = 0;
1007        uint32_t dsd_list_len;
1008        struct dsd_dma *dsd_ptr;
1009        struct scatterlist *sg, *sgl;
1010        struct dsd64 *cur_dsd = dsd;
1011        int     i;
1012        uint16_t        used_dsds = tot_dsds;
1013        struct scsi_cmnd *cmd;
1014
1015        if (sp) {
1016                cmd = GET_CMD_SP(sp);
1017                sgl = scsi_sglist(cmd);
1018        } else if (tc) {
1019                sgl = tc->sg;
1020        } else {
1021                BUG();
1022                return 1;
1023        }
1024
1025
1026        for_each_sg(sgl, sg, tot_dsds, i) {
1027                /* Allocate additional continuation packets? */
1028                if (avail_dsds == 0) {
1029                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1030                                        QLA_DSDS_PER_IOCB : used_dsds;
1031                        dsd_list_len = (avail_dsds + 1) * 12;
1032                        used_dsds -= avail_dsds;
1033
1034                        /* allocate tracking DS */
1035                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1036                        if (!dsd_ptr)
1037                                return 1;
1038
1039                        /* allocate new list */
1040                        dsd_ptr->dsd_addr = next_dsd =
1041                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1042                                &dsd_ptr->dsd_list_dma);
1043
1044                        if (!next_dsd) {
1045                                /*
1046                                 * Need to cleanup only this dsd_ptr, rest
1047                                 * will be done by sp_free_dma()
1048                                 */
1049                                kfree(dsd_ptr);
1050                                return 1;
1051                        }
1052
1053                        if (sp) {
1054                                list_add_tail(&dsd_ptr->list,
1055                                              &sp->u.scmd.crc_ctx->dsd_list);
1056
1057                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1058                        } else {
1059                                list_add_tail(&dsd_ptr->list,
1060                                    &(tc->ctx->dsd_list));
1061                                *tc->ctx_dsd_alloced = 1;
1062                        }
1063
1064                        /* add new list to cmd iocb or last list */
1065                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
1066                                           &cur_dsd->address);
1067                        cur_dsd->length = cpu_to_le32(dsd_list_len);
1068                        cur_dsd = next_dsd;
1069                }
1070                append_dsd64(&cur_dsd, sg);
1071                avail_dsds--;
1072
1073        }
1074        /* Null termination */
1075        cur_dsd->address = 0;
1076        cur_dsd->length = 0;
1077        cur_dsd++;
1078        return 0;
1079}
1080
1081int
1082qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1083        struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1084{
1085        struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1086        struct scatterlist *sg, *sgl;
1087        struct crc_context *difctx = NULL;
1088        struct scsi_qla_host *vha;
1089        uint dsd_list_len;
1090        uint avail_dsds = 0;
1091        uint used_dsds = tot_dsds;
1092        bool dif_local_dma_alloc = false;
1093        bool direction_to_device = false;
1094        int i;
1095
1096        if (sp) {
1097                struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1098
1099                sgl = scsi_prot_sglist(cmd);
1100                vha = sp->vha;
1101                difctx = sp->u.scmd.crc_ctx;
1102                direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1103                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1104                  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1105                        __func__, cmd, difctx, sp);
1106        } else if (tc) {
1107                vha = tc->vha;
1108                sgl = tc->prot_sg;
1109                difctx = tc->ctx;
1110                direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1111        } else {
1112                BUG();
1113                return 1;
1114        }
1115
1116        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1117            "%s: enter (write=%u)\n", __func__, direction_to_device);
1118
1119        /* if initiator doing write or target doing read */
1120        if (direction_to_device) {
1121                for_each_sg(sgl, sg, tot_dsds, i) {
1122                        u64 sle_phys = sg_phys(sg);
1123
1124                        /* If SGE addr + len flips bits in upper 32-bits */
1125                        if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1126                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1127                                    "%s: page boundary crossing (phys=%llx len=%x)\n",
1128                                    __func__, sle_phys, sg->length);
1129
1130                                if (difctx) {
1131                                        ha->dif_bundle_crossed_pages++;
1132                                        dif_local_dma_alloc = true;
1133                                } else {
1134                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1135                                            vha, 0xe022,
1136                                            "%s: difctx pointer is NULL\n",
1137                                            __func__);
1138                                }
1139                                break;
1140                        }
1141                }
1142                ha->dif_bundle_writes++;
1143        } else {
1144                ha->dif_bundle_reads++;
1145        }
1146
1147        if (ql2xdifbundlinginternalbuffers)
1148                dif_local_dma_alloc = direction_to_device;
1149
1150        if (dif_local_dma_alloc) {
1151                u32 track_difbundl_buf = 0;
1152                u32 ldma_sg_len = 0;
1153                u8 ldma_needed = 1;
1154
1155                difctx->no_dif_bundl = 0;
1156                difctx->dif_bundl_len = 0;
1157
1158                /* Track DSD buffers */
1159                INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1160                /* Track local DMA buffers */
1161                INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1162
1163                for_each_sg(sgl, sg, tot_dsds, i) {
1164                        u32 sglen = sg_dma_len(sg);
1165
1166                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1167                            "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1168                            __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1169                            difctx->dif_bundl_len, ldma_needed);
1170
1171                        while (sglen) {
1172                                u32 xfrlen = 0;
1173
1174                                if (ldma_needed) {
1175                                        /*
1176                                         * Allocate list item to store
1177                                         * the DMA buffers
1178                                         */
1179                                        dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1180                                            GFP_ATOMIC);
1181                                        if (!dsd_ptr) {
1182                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1183                                                    "%s: failed alloc dsd_ptr\n",
1184                                                    __func__);
1185                                                return 1;
1186                                        }
1187                                        ha->dif_bundle_kallocs++;
1188
1189                                        /* allocate dma buffer */
1190                                        dsd_ptr->dsd_addr = dma_pool_alloc
1191                                                (ha->dif_bundl_pool, GFP_ATOMIC,
1192                                                 &dsd_ptr->dsd_list_dma);
1193                                        if (!dsd_ptr->dsd_addr) {
1194                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1195                                                    "%s: failed alloc ->dsd_ptr\n",
1196                                                    __func__);
1197                                                /*
1198                                                 * need to cleanup only this
1199                                                 * dsd_ptr rest will be done
1200                                                 * by sp_free_dma()
1201                                                 */
1202                                                kfree(dsd_ptr);
1203                                                ha->dif_bundle_kallocs--;
1204                                                return 1;
1205                                        }
1206                                        ha->dif_bundle_dma_allocs++;
1207                                        ldma_needed = 0;
1208                                        difctx->no_dif_bundl++;
1209                                        list_add_tail(&dsd_ptr->list,
1210                                            &difctx->ldif_dma_hndl_list);
1211                                }
1212
1213                                /* xfrlen is min of dma pool size and sglen */
1214                                xfrlen = (sglen >
1215                                   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1216                                    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1217                                    sglen;
1218
1219                                /* replace with local allocated dma buffer */
1220                                sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1221                                    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1222                                    difctx->dif_bundl_len);
1223                                difctx->dif_bundl_len += xfrlen;
1224                                sglen -= xfrlen;
1225                                ldma_sg_len += xfrlen;
1226                                if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1227                                    sg_is_last(sg)) {
1228                                        ldma_needed = 1;
1229                                        ldma_sg_len = 0;
1230                                }
1231                        }
1232                }
1233
1234                track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1235                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1236                    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1237                    difctx->dif_bundl_len, difctx->no_dif_bundl,
1238                    track_difbundl_buf);
1239
1240                if (sp)
1241                        sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1242                else
1243                        tc->prot_flags = DIF_BUNDL_DMA_VALID;
1244
1245                list_for_each_entry_safe(dif_dsd, nxt_dsd,
1246                    &difctx->ldif_dma_hndl_list, list) {
1247                        u32 sglen = (difctx->dif_bundl_len >
1248                            DIF_BUNDLING_DMA_POOL_SIZE) ?
1249                            DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1250
1251                        BUG_ON(track_difbundl_buf == 0);
1252
1253                        /* Allocate additional continuation packets? */
1254                        if (avail_dsds == 0) {
1255                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1256                                    0xe024,
1257                                    "%s: adding continuation iocb's\n",
1258                                    __func__);
1259                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1260                                    QLA_DSDS_PER_IOCB : used_dsds;
1261                                dsd_list_len = (avail_dsds + 1) * 12;
1262                                used_dsds -= avail_dsds;
1263
1264                                /* allocate tracking DS */
1265                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1266                                if (!dsd_ptr) {
1267                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1268                                            "%s: failed alloc dsd_ptr\n",
1269                                            __func__);
1270                                        return 1;
1271                                }
1272                                ha->dif_bundle_kallocs++;
1273
1274                                difctx->no_ldif_dsd++;
1275                                /* allocate new list */
1276                                dsd_ptr->dsd_addr =
1277                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1278                                        &dsd_ptr->dsd_list_dma);
1279                                if (!dsd_ptr->dsd_addr) {
1280                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1281                                            "%s: failed alloc ->dsd_addr\n",
1282                                            __func__);
1283                                        /*
1284                                         * need to cleanup only this dsd_ptr
1285                                         *  rest will be done by sp_free_dma()
1286                                         */
1287                                        kfree(dsd_ptr);
1288                                        ha->dif_bundle_kallocs--;
1289                                        return 1;
1290                                }
1291                                ha->dif_bundle_dma_allocs++;
1292
1293                                if (sp) {
1294                                        list_add_tail(&dsd_ptr->list,
1295                                            &difctx->ldif_dsd_list);
1296                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1297                                } else {
1298                                        list_add_tail(&dsd_ptr->list,
1299                                            &difctx->ldif_dsd_list);
1300                                        tc->ctx_dsd_alloced = 1;
1301                                }
1302
1303                                /* add new list to cmd iocb or last list */
1304                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1305                                                   &cur_dsd->address);
1306                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1307                                cur_dsd = dsd_ptr->dsd_addr;
1308                        }
1309                        put_unaligned_le64(dif_dsd->dsd_list_dma,
1310                                           &cur_dsd->address);
1311                        cur_dsd->length = cpu_to_le32(sglen);
1312                        cur_dsd++;
1313                        avail_dsds--;
1314                        difctx->dif_bundl_len -= sglen;
1315                        track_difbundl_buf--;
1316                }
1317
1318                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1319                    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1320                        difctx->no_ldif_dsd, difctx->no_dif_bundl);
1321        } else {
1322                for_each_sg(sgl, sg, tot_dsds, i) {
1323                        /* Allocate additional continuation packets? */
1324                        if (avail_dsds == 0) {
1325                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1326                                    QLA_DSDS_PER_IOCB : used_dsds;
1327                                dsd_list_len = (avail_dsds + 1) * 12;
1328                                used_dsds -= avail_dsds;
1329
1330                                /* allocate tracking DS */
1331                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1332                                if (!dsd_ptr) {
1333                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1334                                            vha, 0xe027,
1335                                            "%s: failed alloc dsd_dma...\n",
1336                                            __func__);
1337                                        return 1;
1338                                }
1339
1340                                /* allocate new list */
1341                                dsd_ptr->dsd_addr =
1342                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1343                                        &dsd_ptr->dsd_list_dma);
1344                                if (!dsd_ptr->dsd_addr) {
1345                                        /* need to cleanup only this dsd_ptr */
1346                                        /* rest will be done by sp_free_dma() */
1347                                        kfree(dsd_ptr);
1348                                        return 1;
1349                                }
1350
1351                                if (sp) {
1352                                        list_add_tail(&dsd_ptr->list,
1353                                            &difctx->dsd_list);
1354                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1355                                } else {
1356                                        list_add_tail(&dsd_ptr->list,
1357                                            &difctx->dsd_list);
1358                                        tc->ctx_dsd_alloced = 1;
1359                                }
1360
1361                                /* add new list to cmd iocb or last list */
1362                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1363                                                   &cur_dsd->address);
1364                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1365                                cur_dsd = dsd_ptr->dsd_addr;
1366                        }
1367                        append_dsd64(&cur_dsd, sg);
1368                        avail_dsds--;
1369                }
1370        }
1371        /* Null termination */
1372        cur_dsd->address = 0;
1373        cur_dsd->length = 0;
1374        cur_dsd++;
1375        return 0;
1376}
1377
1378/**
1379 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1380 *                                                      Type 6 IOCB types.
1381 *
1382 * @sp: SRB command to process
1383 * @cmd_pkt: Command type 3 IOCB
1384 * @tot_dsds: Total number of segments to transfer
1385 * @tot_prot_dsds: Total number of segments with protection information
1386 * @fw_prot_opts: Protection options to be passed to firmware
1387 */
1388static inline int
1389qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1390    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1391{
1392        struct dsd64            *cur_dsd;
1393        __be32                  *fcp_dl;
1394        scsi_qla_host_t         *vha;
1395        struct scsi_cmnd        *cmd;
1396        uint32_t                total_bytes = 0;
1397        uint32_t                data_bytes;
1398        uint32_t                dif_bytes;
1399        uint8_t                 bundling = 1;
1400        uint16_t                blk_size;
1401        struct crc_context      *crc_ctx_pkt = NULL;
1402        struct qla_hw_data      *ha;
1403        uint8_t                 additional_fcpcdb_len;
1404        uint16_t                fcp_cmnd_len;
1405        struct fcp_cmnd         *fcp_cmnd;
1406        dma_addr_t              crc_ctx_dma;
1407
1408        cmd = GET_CMD_SP(sp);
1409
1410        /* Update entry type to indicate Command Type CRC_2 IOCB */
1411        put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1412
1413        vha = sp->vha;
1414        ha = vha->hw;
1415
1416        /* No data transfer */
1417        data_bytes = scsi_bufflen(cmd);
1418        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1419                cmd_pkt->byte_count = cpu_to_le32(0);
1420                return QLA_SUCCESS;
1421        }
1422
1423        cmd_pkt->vp_index = sp->vha->vp_idx;
1424
1425        /* Set transfer direction */
1426        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1427                cmd_pkt->control_flags =
1428                    cpu_to_le16(CF_WRITE_DATA);
1429        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1430                cmd_pkt->control_flags =
1431                    cpu_to_le16(CF_READ_DATA);
1432        }
1433
1434        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1435            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1436            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1437            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1438                bundling = 0;
1439
1440        /* Allocate CRC context from global pool */
1441        crc_ctx_pkt = sp->u.scmd.crc_ctx =
1442            dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1443
1444        if (!crc_ctx_pkt)
1445                goto crc_queuing_error;
1446
1447        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1448
1449        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1450
1451        /* Set handle */
1452        crc_ctx_pkt->handle = cmd_pkt->handle;
1453
1454        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1455
1456        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1457            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1458
1459        put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1460        cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1461
1462        /* Determine SCSI command length -- align to 4 byte boundary */
1463        if (cmd->cmd_len > 16) {
1464                additional_fcpcdb_len = cmd->cmd_len - 16;
1465                if ((cmd->cmd_len % 4) != 0) {
1466                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1467                        goto crc_queuing_error;
1468                }
1469                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1470        } else {
1471                additional_fcpcdb_len = 0;
1472                fcp_cmnd_len = 12 + 16 + 4;
1473        }
1474
1475        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1476
1477        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1478        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1479                fcp_cmnd->additional_cdb_len |= 1;
1480        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1481                fcp_cmnd->additional_cdb_len |= 2;
1482
1483        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1484        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1485        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1486        put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1487                           &cmd_pkt->fcp_cmnd_dseg_address);
1488        fcp_cmnd->task_management = 0;
1489        fcp_cmnd->task_attribute = TSK_SIMPLE;
1490
1491        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1492
1493        /* Compute dif len and adjust data len to incude protection */
1494        dif_bytes = 0;
1495        blk_size = cmd->device->sector_size;
1496        dif_bytes = (data_bytes / blk_size) * 8;
1497
1498        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1499        case SCSI_PROT_READ_INSERT:
1500        case SCSI_PROT_WRITE_STRIP:
1501                total_bytes = data_bytes;
1502                data_bytes += dif_bytes;
1503                break;
1504
1505        case SCSI_PROT_READ_STRIP:
1506        case SCSI_PROT_WRITE_INSERT:
1507        case SCSI_PROT_READ_PASS:
1508        case SCSI_PROT_WRITE_PASS:
1509                total_bytes = data_bytes + dif_bytes;
1510                break;
1511        default:
1512                BUG();
1513        }
1514
1515        if (!qla2x00_hba_err_chk_enabled(sp))
1516                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1517        /* HBA error checking enabled */
1518        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1519                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1520                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1521                        SCSI_PROT_DIF_TYPE2))
1522                        fw_prot_opts |= BIT_10;
1523                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1524                    SCSI_PROT_DIF_TYPE3)
1525                        fw_prot_opts |= BIT_11;
1526        }
1527
1528        if (!bundling) {
1529                cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1530        } else {
1531                /*
1532                 * Configure Bundling if we need to fetch interlaving
1533                 * protection PCI accesses
1534                 */
1535                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1536                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1537                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1538                                                        tot_prot_dsds);
1539                cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1540        }
1541
1542        /* Finish the common fields of CRC pkt */
1543        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1544        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1545        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1546        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1547        /* Fibre channel byte count */
1548        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1549        fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1550            additional_fcpcdb_len);
1551        *fcp_dl = htonl(total_bytes);
1552
1553        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1554                cmd_pkt->byte_count = cpu_to_le32(0);
1555                return QLA_SUCCESS;
1556        }
1557        /* Walks data segments */
1558
1559        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1560
1561        if (!bundling && tot_prot_dsds) {
1562                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1563                        cur_dsd, tot_dsds, NULL))
1564                        goto crc_queuing_error;
1565        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1566                        (tot_dsds - tot_prot_dsds), NULL))
1567                goto crc_queuing_error;
1568
1569        if (bundling && tot_prot_dsds) {
1570                /* Walks dif segments */
1571                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1572                cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1573                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1574                                tot_prot_dsds, NULL))
1575                        goto crc_queuing_error;
1576        }
1577        return QLA_SUCCESS;
1578
1579crc_queuing_error:
1580        /* Cleanup will be performed by the caller */
1581
1582        return QLA_FUNCTION_FAILED;
1583}
1584
1585/**
1586 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1587 * @sp: command to send to the ISP
1588 *
1589 * Returns non-zero if a failure occurred, else zero.
1590 */
1591int
1592qla24xx_start_scsi(srb_t *sp)
1593{
1594        int             nseg;
1595        unsigned long   flags;
1596        uint32_t        *clr_ptr;
1597        uint32_t        handle;
1598        struct cmd_type_7 *cmd_pkt;
1599        uint16_t        cnt;
1600        uint16_t        req_cnt;
1601        uint16_t        tot_dsds;
1602        struct req_que *req = NULL;
1603        struct rsp_que *rsp;
1604        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1605        struct scsi_qla_host *vha = sp->vha;
1606        struct qla_hw_data *ha = vha->hw;
1607
1608        /* Setup device pointers. */
1609        req = vha->req;
1610        rsp = req->rsp;
1611
1612        /* So we know we haven't pci_map'ed anything yet */
1613        tot_dsds = 0;
1614
1615        /* Send marker if required */
1616        if (vha->marker_needed != 0) {
1617                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1618                    QLA_SUCCESS)
1619                        return QLA_FUNCTION_FAILED;
1620                vha->marker_needed = 0;
1621        }
1622
1623        /* Acquire ring specific lock */
1624        spin_lock_irqsave(&ha->hardware_lock, flags);
1625
1626        handle = qla2xxx_get_next_handle(req);
1627        if (handle == 0)
1628                goto queuing_error;
1629
1630        /* Map the sg table so we have an accurate count of sg entries needed */
1631        if (scsi_sg_count(cmd)) {
1632                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1633                    scsi_sg_count(cmd), cmd->sc_data_direction);
1634                if (unlikely(!nseg))
1635                        goto queuing_error;
1636        } else
1637                nseg = 0;
1638
1639        tot_dsds = nseg;
1640        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1641
1642        sp->iores.res_type = RESOURCE_INI;
1643        sp->iores.iocb_cnt = req_cnt;
1644        if (qla_get_iocbs(sp->qpair, &sp->iores))
1645                goto queuing_error;
1646
1647        if (req->cnt < (req_cnt + 2)) {
1648                if (IS_SHADOW_REG_CAPABLE(ha)) {
1649                        cnt = *req->out_ptr;
1650                } else {
1651                        cnt = rd_reg_dword_relaxed(req->req_q_out);
1652                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1653                                goto queuing_error;
1654                }
1655
1656                if (req->ring_index < cnt)
1657                        req->cnt = cnt - req->ring_index;
1658                else
1659                        req->cnt = req->length -
1660                                (req->ring_index - cnt);
1661                if (req->cnt < (req_cnt + 2))
1662                        goto queuing_error;
1663        }
1664
1665        /* Build command packet. */
1666        req->current_outstanding_cmd = handle;
1667        req->outstanding_cmds[handle] = sp;
1668        sp->handle = handle;
1669        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1670        req->cnt -= req_cnt;
1671
1672        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1673        cmd_pkt->handle = make_handle(req->id, handle);
1674
1675        /* Zero out remaining portion of packet. */
1676        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1677        clr_ptr = (uint32_t *)cmd_pkt + 2;
1678        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1679        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1680
1681        /* Set NPORT-ID and LUN number*/
1682        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1683        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1684        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1685        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1686        cmd_pkt->vp_index = sp->vha->vp_idx;
1687
1688        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1689        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1690
1691        cmd_pkt->task = TSK_SIMPLE;
1692
1693        /* Load SCSI command packet. */
1694        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1695        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1696
1697        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1698
1699        /* Build IOCB segments */
1700        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1701
1702        /* Set total data segment count. */
1703        cmd_pkt->entry_count = (uint8_t)req_cnt;
1704        wmb();
1705        /* Adjust ring index. */
1706        req->ring_index++;
1707        if (req->ring_index == req->length) {
1708                req->ring_index = 0;
1709                req->ring_ptr = req->ring;
1710        } else
1711                req->ring_ptr++;
1712
1713        sp->qpair->cmd_cnt++;
1714        sp->flags |= SRB_DMA_VALID;
1715
1716        /* Set chip new ring index. */
1717        wrt_reg_dword(req->req_q_in, req->ring_index);
1718
1719        /* Manage unprocessed RIO/ZIO commands in response queue. */
1720        if (vha->flags.process_response_queue &&
1721            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1722                qla24xx_process_response_queue(vha, rsp);
1723
1724        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1725        return QLA_SUCCESS;
1726
1727queuing_error:
1728        if (tot_dsds)
1729                scsi_dma_unmap(cmd);
1730
1731        qla_put_iocbs(sp->qpair, &sp->iores);
1732        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1733
1734        return QLA_FUNCTION_FAILED;
1735}
1736
1737/**
1738 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1739 * @sp: command to send to the ISP
1740 *
1741 * Returns non-zero if a failure occurred, else zero.
1742 */
1743int
1744qla24xx_dif_start_scsi(srb_t *sp)
1745{
1746        int                     nseg;
1747        unsigned long           flags;
1748        uint32_t                *clr_ptr;
1749        uint32_t                handle;
1750        uint16_t                cnt;
1751        uint16_t                req_cnt = 0;
1752        uint16_t                tot_dsds;
1753        uint16_t                tot_prot_dsds;
1754        uint16_t                fw_prot_opts = 0;
1755        struct req_que          *req = NULL;
1756        struct rsp_que          *rsp = NULL;
1757        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1758        struct scsi_qla_host    *vha = sp->vha;
1759        struct qla_hw_data      *ha = vha->hw;
1760        struct cmd_type_crc_2   *cmd_pkt;
1761        uint32_t                status = 0;
1762
1763#define QDSS_GOT_Q_SPACE        BIT_0
1764
1765        /* Only process protection or >16 cdb in this routine */
1766        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1767                if (cmd->cmd_len <= 16)
1768                        return qla24xx_start_scsi(sp);
1769        }
1770
1771        /* Setup device pointers. */
1772        req = vha->req;
1773        rsp = req->rsp;
1774
1775        /* So we know we haven't pci_map'ed anything yet */
1776        tot_dsds = 0;
1777
1778        /* Send marker if required */
1779        if (vha->marker_needed != 0) {
1780                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1781                    QLA_SUCCESS)
1782                        return QLA_FUNCTION_FAILED;
1783                vha->marker_needed = 0;
1784        }
1785
1786        /* Acquire ring specific lock */
1787        spin_lock_irqsave(&ha->hardware_lock, flags);
1788
1789        handle = qla2xxx_get_next_handle(req);
1790        if (handle == 0)
1791                goto queuing_error;
1792
1793        /* Compute number of required data segments */
1794        /* Map the sg table so we have an accurate count of sg entries needed */
1795        if (scsi_sg_count(cmd)) {
1796                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1797                    scsi_sg_count(cmd), cmd->sc_data_direction);
1798                if (unlikely(!nseg))
1799                        goto queuing_error;
1800                else
1801                        sp->flags |= SRB_DMA_VALID;
1802
1803                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1804                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1805                        struct qla2_sgx sgx;
1806                        uint32_t        partial;
1807
1808                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1809                        sgx.tot_bytes = scsi_bufflen(cmd);
1810                        sgx.cur_sg = scsi_sglist(cmd);
1811                        sgx.sp = sp;
1812
1813                        nseg = 0;
1814                        while (qla24xx_get_one_block_sg(
1815                            cmd->device->sector_size, &sgx, &partial))
1816                                nseg++;
1817                }
1818        } else
1819                nseg = 0;
1820
1821        /* number of required data segments */
1822        tot_dsds = nseg;
1823
1824        /* Compute number of required protection segments */
1825        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1826                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1827                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1828                if (unlikely(!nseg))
1829                        goto queuing_error;
1830                else
1831                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1832
1833                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1834                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1835                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1836                }
1837        } else {
1838                nseg = 0;
1839        }
1840
1841        req_cnt = 1;
1842        /* Total Data and protection sg segment(s) */
1843        tot_prot_dsds = nseg;
1844        tot_dsds += nseg;
1845
1846        sp->iores.res_type = RESOURCE_INI;
1847        sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1848        if (qla_get_iocbs(sp->qpair, &sp->iores))
1849                goto queuing_error;
1850
1851        if (req->cnt < (req_cnt + 2)) {
1852                if (IS_SHADOW_REG_CAPABLE(ha)) {
1853                        cnt = *req->out_ptr;
1854                } else {
1855                        cnt = rd_reg_dword_relaxed(req->req_q_out);
1856                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1857                                goto queuing_error;
1858                }
1859                if (req->ring_index < cnt)
1860                        req->cnt = cnt - req->ring_index;
1861                else
1862                        req->cnt = req->length -
1863                                (req->ring_index - cnt);
1864                if (req->cnt < (req_cnt + 2))
1865                        goto queuing_error;
1866        }
1867
1868        status |= QDSS_GOT_Q_SPACE;
1869
1870        /* Build header part of command packet (excluding the OPCODE). */
1871        req->current_outstanding_cmd = handle;
1872        req->outstanding_cmds[handle] = sp;
1873        sp->handle = handle;
1874        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1875        req->cnt -= req_cnt;
1876
1877        /* Fill-in common area */
1878        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1879        cmd_pkt->handle = make_handle(req->id, handle);
1880
1881        clr_ptr = (uint32_t *)cmd_pkt + 2;
1882        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1883
1884        /* Set NPORT-ID and LUN number*/
1885        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1886        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1887        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1888        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1889
1890        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1891        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1892
1893        /* Total Data and protection segment(s) */
1894        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1895
1896        /* Build IOCB segments and adjust for data protection segments */
1897        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1898            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1899                QLA_SUCCESS)
1900                goto queuing_error;
1901
1902        cmd_pkt->entry_count = (uint8_t)req_cnt;
1903        /* Specify response queue number where completion should happen */
1904        cmd_pkt->entry_status = (uint8_t) rsp->id;
1905        cmd_pkt->timeout = cpu_to_le16(0);
1906        wmb();
1907
1908        /* Adjust ring index. */
1909        req->ring_index++;
1910        if (req->ring_index == req->length) {
1911                req->ring_index = 0;
1912                req->ring_ptr = req->ring;
1913        } else
1914                req->ring_ptr++;
1915
1916        sp->qpair->cmd_cnt++;
1917        /* Set chip new ring index. */
1918        wrt_reg_dword(req->req_q_in, req->ring_index);
1919
1920        /* Manage unprocessed RIO/ZIO commands in response queue. */
1921        if (vha->flags.process_response_queue &&
1922            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1923                qla24xx_process_response_queue(vha, rsp);
1924
1925        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1926
1927        return QLA_SUCCESS;
1928
1929queuing_error:
1930        if (status & QDSS_GOT_Q_SPACE) {
1931                req->outstanding_cmds[handle] = NULL;
1932                req->cnt += req_cnt;
1933        }
1934        /* Cleanup will be performed by the caller (queuecommand) */
1935
1936        qla_put_iocbs(sp->qpair, &sp->iores);
1937        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1938
1939        return QLA_FUNCTION_FAILED;
1940}
1941
1942/**
1943 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1944 * @sp: command to send to the ISP
1945 *
1946 * Returns non-zero if a failure occurred, else zero.
1947 */
1948static int
1949qla2xxx_start_scsi_mq(srb_t *sp)
1950{
1951        int             nseg;
1952        unsigned long   flags;
1953        uint32_t        *clr_ptr;
1954        uint32_t        handle;
1955        struct cmd_type_7 *cmd_pkt;
1956        uint16_t        cnt;
1957        uint16_t        req_cnt;
1958        uint16_t        tot_dsds;
1959        struct req_que *req = NULL;
1960        struct rsp_que *rsp;
1961        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1962        struct scsi_qla_host *vha = sp->fcport->vha;
1963        struct qla_hw_data *ha = vha->hw;
1964        struct qla_qpair *qpair = sp->qpair;
1965
1966        /* Acquire qpair specific lock */
1967        spin_lock_irqsave(&qpair->qp_lock, flags);
1968
1969        /* Setup qpair pointers */
1970        req = qpair->req;
1971        rsp = qpair->rsp;
1972
1973        /* So we know we haven't pci_map'ed anything yet */
1974        tot_dsds = 0;
1975
1976        /* Send marker if required */
1977        if (vha->marker_needed != 0) {
1978                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1979                    QLA_SUCCESS) {
1980                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1981                        return QLA_FUNCTION_FAILED;
1982                }
1983                vha->marker_needed = 0;
1984        }
1985
1986        handle = qla2xxx_get_next_handle(req);
1987        if (handle == 0)
1988                goto queuing_error;
1989
1990        /* Map the sg table so we have an accurate count of sg entries needed */
1991        if (scsi_sg_count(cmd)) {
1992                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1993                    scsi_sg_count(cmd), cmd->sc_data_direction);
1994                if (unlikely(!nseg))
1995                        goto queuing_error;
1996        } else
1997                nseg = 0;
1998
1999        tot_dsds = nseg;
2000        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2001
2002        sp->iores.res_type = RESOURCE_INI;
2003        sp->iores.iocb_cnt = req_cnt;
2004        if (qla_get_iocbs(sp->qpair, &sp->iores))
2005                goto queuing_error;
2006
2007        if (req->cnt < (req_cnt + 2)) {
2008                if (IS_SHADOW_REG_CAPABLE(ha)) {
2009                        cnt = *req->out_ptr;
2010                } else {
2011                        cnt = rd_reg_dword_relaxed(req->req_q_out);
2012                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2013                                goto queuing_error;
2014                }
2015
2016                if (req->ring_index < cnt)
2017                        req->cnt = cnt - req->ring_index;
2018                else
2019                        req->cnt = req->length -
2020                                (req->ring_index - cnt);
2021                if (req->cnt < (req_cnt + 2))
2022                        goto queuing_error;
2023        }
2024
2025        /* Build command packet. */
2026        req->current_outstanding_cmd = handle;
2027        req->outstanding_cmds[handle] = sp;
2028        sp->handle = handle;
2029        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2030        req->cnt -= req_cnt;
2031
2032        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2033        cmd_pkt->handle = make_handle(req->id, handle);
2034
2035        /* Zero out remaining portion of packet. */
2036        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2037        clr_ptr = (uint32_t *)cmd_pkt + 2;
2038        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2039        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2040
2041        /* Set NPORT-ID and LUN number*/
2042        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2043        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2044        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2045        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2046        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2047
2048        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2049        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2050
2051        cmd_pkt->task = TSK_SIMPLE;
2052
2053        /* Load SCSI command packet. */
2054        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2055        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2056
2057        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2058
2059        /* Build IOCB segments */
2060        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2061
2062        /* Set total data segment count. */
2063        cmd_pkt->entry_count = (uint8_t)req_cnt;
2064        wmb();
2065        /* Adjust ring index. */
2066        req->ring_index++;
2067        if (req->ring_index == req->length) {
2068                req->ring_index = 0;
2069                req->ring_ptr = req->ring;
2070        } else
2071                req->ring_ptr++;
2072
2073        sp->qpair->cmd_cnt++;
2074        sp->flags |= SRB_DMA_VALID;
2075
2076        /* Set chip new ring index. */
2077        wrt_reg_dword(req->req_q_in, req->ring_index);
2078
2079        /* Manage unprocessed RIO/ZIO commands in response queue. */
2080        if (vha->flags.process_response_queue &&
2081            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2082                qla24xx_process_response_queue(vha, rsp);
2083
2084        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2085        return QLA_SUCCESS;
2086
2087queuing_error:
2088        if (tot_dsds)
2089                scsi_dma_unmap(cmd);
2090
2091        qla_put_iocbs(sp->qpair, &sp->iores);
2092        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2093
2094        return QLA_FUNCTION_FAILED;
2095}
2096
2097
2098/**
2099 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2100 * @sp: command to send to the ISP
2101 *
2102 * Returns non-zero if a failure occurred, else zero.
2103 */
2104int
2105qla2xxx_dif_start_scsi_mq(srb_t *sp)
2106{
2107        int                     nseg;
2108        unsigned long           flags;
2109        uint32_t                *clr_ptr;
2110        uint32_t                handle;
2111        uint16_t                cnt;
2112        uint16_t                req_cnt = 0;
2113        uint16_t                tot_dsds;
2114        uint16_t                tot_prot_dsds;
2115        uint16_t                fw_prot_opts = 0;
2116        struct req_que          *req = NULL;
2117        struct rsp_que          *rsp = NULL;
2118        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2119        struct scsi_qla_host    *vha = sp->fcport->vha;
2120        struct qla_hw_data      *ha = vha->hw;
2121        struct cmd_type_crc_2   *cmd_pkt;
2122        uint32_t                status = 0;
2123        struct qla_qpair        *qpair = sp->qpair;
2124
2125#define QDSS_GOT_Q_SPACE        BIT_0
2126
2127        /* Check for host side state */
2128        if (!qpair->online) {
2129                cmd->result = DID_NO_CONNECT << 16;
2130                return QLA_INTERFACE_ERROR;
2131        }
2132
2133        if (!qpair->difdix_supported &&
2134                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2135                cmd->result = DID_NO_CONNECT << 16;
2136                return QLA_INTERFACE_ERROR;
2137        }
2138
2139        /* Only process protection or >16 cdb in this routine */
2140        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2141                if (cmd->cmd_len <= 16)
2142                        return qla2xxx_start_scsi_mq(sp);
2143        }
2144
2145        spin_lock_irqsave(&qpair->qp_lock, flags);
2146
2147        /* Setup qpair pointers */
2148        rsp = qpair->rsp;
2149        req = qpair->req;
2150
2151        /* So we know we haven't pci_map'ed anything yet */
2152        tot_dsds = 0;
2153
2154        /* Send marker if required */
2155        if (vha->marker_needed != 0) {
2156                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2157                    QLA_SUCCESS) {
2158                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2159                        return QLA_FUNCTION_FAILED;
2160                }
2161                vha->marker_needed = 0;
2162        }
2163
2164        handle = qla2xxx_get_next_handle(req);
2165        if (handle == 0)
2166                goto queuing_error;
2167
2168        /* Compute number of required data segments */
2169        /* Map the sg table so we have an accurate count of sg entries needed */
2170        if (scsi_sg_count(cmd)) {
2171                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2172                    scsi_sg_count(cmd), cmd->sc_data_direction);
2173                if (unlikely(!nseg))
2174                        goto queuing_error;
2175                else
2176                        sp->flags |= SRB_DMA_VALID;
2177
2178                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2179                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2180                        struct qla2_sgx sgx;
2181                        uint32_t        partial;
2182
2183                        memset(&sgx, 0, sizeof(struct qla2_sgx));
2184                        sgx.tot_bytes = scsi_bufflen(cmd);
2185                        sgx.cur_sg = scsi_sglist(cmd);
2186                        sgx.sp = sp;
2187
2188                        nseg = 0;
2189                        while (qla24xx_get_one_block_sg(
2190                            cmd->device->sector_size, &sgx, &partial))
2191                                nseg++;
2192                }
2193        } else
2194                nseg = 0;
2195
2196        /* number of required data segments */
2197        tot_dsds = nseg;
2198
2199        /* Compute number of required protection segments */
2200        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2201                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2202                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2203                if (unlikely(!nseg))
2204                        goto queuing_error;
2205                else
2206                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
2207
2208                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2209                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2210                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2211                }
2212        } else {
2213                nseg = 0;
2214        }
2215
2216        req_cnt = 1;
2217        /* Total Data and protection sg segment(s) */
2218        tot_prot_dsds = nseg;
2219        tot_dsds += nseg;
2220
2221        sp->iores.res_type = RESOURCE_INI;
2222        sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2223        if (qla_get_iocbs(sp->qpair, &sp->iores))
2224                goto queuing_error;
2225
2226        if (req->cnt < (req_cnt + 2)) {
2227                if (IS_SHADOW_REG_CAPABLE(ha)) {
2228                        cnt = *req->out_ptr;
2229                } else {
2230                        cnt = rd_reg_dword_relaxed(req->req_q_out);
2231                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2232                                goto queuing_error;
2233                }
2234
2235                if (req->ring_index < cnt)
2236                        req->cnt = cnt - req->ring_index;
2237                else
2238                        req->cnt = req->length -
2239                                (req->ring_index - cnt);
2240                if (req->cnt < (req_cnt + 2))
2241                        goto queuing_error;
2242        }
2243
2244        status |= QDSS_GOT_Q_SPACE;
2245
2246        /* Build header part of command packet (excluding the OPCODE). */
2247        req->current_outstanding_cmd = handle;
2248        req->outstanding_cmds[handle] = sp;
2249        sp->handle = handle;
2250        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2251        req->cnt -= req_cnt;
2252
2253        /* Fill-in common area */
2254        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2255        cmd_pkt->handle = make_handle(req->id, handle);
2256
2257        clr_ptr = (uint32_t *)cmd_pkt + 2;
2258        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2259
2260        /* Set NPORT-ID and LUN number*/
2261        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2262        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2263        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2264        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2265
2266        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2267        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2268
2269        /* Total Data and protection segment(s) */
2270        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2271
2272        /* Build IOCB segments and adjust for data protection segments */
2273        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2274            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2275                QLA_SUCCESS)
2276                goto queuing_error;
2277
2278        cmd_pkt->entry_count = (uint8_t)req_cnt;
2279        cmd_pkt->timeout = cpu_to_le16(0);
2280        wmb();
2281
2282        /* Adjust ring index. */
2283        req->ring_index++;
2284        if (req->ring_index == req->length) {
2285                req->ring_index = 0;
2286                req->ring_ptr = req->ring;
2287        } else
2288                req->ring_ptr++;
2289
2290        sp->qpair->cmd_cnt++;
2291        /* Set chip new ring index. */
2292        wrt_reg_dword(req->req_q_in, req->ring_index);
2293
2294        /* Manage unprocessed RIO/ZIO commands in response queue. */
2295        if (vha->flags.process_response_queue &&
2296            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2297                qla24xx_process_response_queue(vha, rsp);
2298
2299        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2300
2301        return QLA_SUCCESS;
2302
2303queuing_error:
2304        if (status & QDSS_GOT_Q_SPACE) {
2305                req->outstanding_cmds[handle] = NULL;
2306                req->cnt += req_cnt;
2307        }
2308        /* Cleanup will be performed by the caller (queuecommand) */
2309
2310        qla_put_iocbs(sp->qpair, &sp->iores);
2311        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2312
2313        return QLA_FUNCTION_FAILED;
2314}
2315
2316/* Generic Control-SRB manipulation functions. */
2317
2318/* hardware_lock assumed to be held. */
2319
2320void *
2321__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2322{
2323        scsi_qla_host_t *vha = qpair->vha;
2324        struct qla_hw_data *ha = vha->hw;
2325        struct req_que *req = qpair->req;
2326        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2327        uint32_t handle;
2328        request_t *pkt;
2329        uint16_t cnt, req_cnt;
2330
2331        pkt = NULL;
2332        req_cnt = 1;
2333        handle = 0;
2334
2335        if (sp && (sp->type != SRB_SCSI_CMD)) {
2336                /* Adjust entry-counts as needed. */
2337                req_cnt = sp->iocbs;
2338        }
2339
2340        /* Check for room on request queue. */
2341        if (req->cnt < req_cnt + 2) {
2342                if (qpair->use_shadow_reg)
2343                        cnt = *req->out_ptr;
2344                else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2345                    IS_QLA28XX(ha))
2346                        cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
2347                else if (IS_P3P_TYPE(ha))
2348                        cnt = rd_reg_dword(reg->isp82.req_q_out);
2349                else if (IS_FWI2_CAPABLE(ha))
2350                        cnt = rd_reg_dword(&reg->isp24.req_q_out);
2351                else if (IS_QLAFX00(ha))
2352                        cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
2353                else
2354                        cnt = qla2x00_debounce_register(
2355                            ISP_REQ_Q_OUT(ha, &reg->isp));
2356
2357                if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2358                        qla_schedule_eeh_work(vha);
2359                        return NULL;
2360                }
2361
2362                if  (req->ring_index < cnt)
2363                        req->cnt = cnt - req->ring_index;
2364                else
2365                        req->cnt = req->length -
2366                            (req->ring_index - cnt);
2367        }
2368        if (req->cnt < req_cnt + 2)
2369                goto queuing_error;
2370
2371        if (sp) {
2372                handle = qla2xxx_get_next_handle(req);
2373                if (handle == 0) {
2374                        ql_log(ql_log_warn, vha, 0x700b,
2375                            "No room on outstanding cmd array.\n");
2376                        goto queuing_error;
2377                }
2378
2379                /* Prep command array. */
2380                req->current_outstanding_cmd = handle;
2381                req->outstanding_cmds[handle] = sp;
2382                sp->handle = handle;
2383        }
2384
2385        /* Prep packet */
2386        req->cnt -= req_cnt;
2387        pkt = req->ring_ptr;
2388        memset(pkt, 0, REQUEST_ENTRY_SIZE);
2389        if (IS_QLAFX00(ha)) {
2390                wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2391                wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2392        } else {
2393                pkt->entry_count = req_cnt;
2394                pkt->handle = handle;
2395        }
2396
2397        return pkt;
2398
2399queuing_error:
2400        qpair->tgt_counters.num_alloc_iocb_failed++;
2401        return pkt;
2402}
2403
2404void *
2405qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2406{
2407        scsi_qla_host_t *vha = qpair->vha;
2408
2409        if (qla2x00_reset_active(vha))
2410                return NULL;
2411
2412        return __qla2x00_alloc_iocbs(qpair, sp);
2413}
2414
2415void *
2416qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2417{
2418        return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2419}
2420
2421static void
2422qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2423{
2424        struct srb_iocb *lio = &sp->u.iocb_cmd;
2425
2426        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2427        logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2428        if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2429                logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2430                if (sp->vha->flags.nvme_first_burst)
2431                        logio->io_parameter[0] =
2432                                cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2433                if (sp->vha->flags.nvme2_enabled) {
2434                        /* Set service parameter BIT_7 for NVME CONF support */
2435                        logio->io_parameter[0] |=
2436                                cpu_to_le32(NVME_PRLI_SP_CONF);
2437                        /* Set service parameter BIT_8 for SLER support */
2438                        logio->io_parameter[0] |=
2439                                cpu_to_le32(NVME_PRLI_SP_SLER);
2440                        /* Set service parameter BIT_9 for PI control support */
2441                        logio->io_parameter[0] |=
2442                                cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2443                }
2444        }
2445
2446        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2447        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2448        logio->port_id[1] = sp->fcport->d_id.b.area;
2449        logio->port_id[2] = sp->fcport->d_id.b.domain;
2450        logio->vp_index = sp->vha->vp_idx;
2451}
2452
2453static void
2454qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2455{
2456        struct srb_iocb *lio = &sp->u.iocb_cmd;
2457
2458        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2459        logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2460
2461        if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2462                logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2463        } else {
2464                logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2465                if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2466                        logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2467                if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2468                        logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2469        }
2470        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2471        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2472        logio->port_id[1] = sp->fcport->d_id.b.area;
2473        logio->port_id[2] = sp->fcport->d_id.b.domain;
2474        logio->vp_index = sp->vha->vp_idx;
2475}
2476
2477static void
2478qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2479{
2480        struct qla_hw_data *ha = sp->vha->hw;
2481        struct srb_iocb *lio = &sp->u.iocb_cmd;
2482        uint16_t opts;
2483
2484        mbx->entry_type = MBX_IOCB_TYPE;
2485        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2486        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2487        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2488        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2489        if (HAS_EXTENDED_IDS(ha)) {
2490                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2491                mbx->mb10 = cpu_to_le16(opts);
2492        } else {
2493                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2494        }
2495        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2496        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2497            sp->fcport->d_id.b.al_pa);
2498        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2499}
2500
2501static void
2502qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2503{
2504        u16 control_flags = LCF_COMMAND_LOGO;
2505        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2506
2507        if (sp->fcport->explicit_logout) {
2508                control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2509        } else {
2510                control_flags |= LCF_IMPL_LOGO;
2511
2512                if (!sp->fcport->keep_nport_handle)
2513                        control_flags |= LCF_FREE_NPORT;
2514        }
2515
2516        logio->control_flags = cpu_to_le16(control_flags);
2517        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2518        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2519        logio->port_id[1] = sp->fcport->d_id.b.area;
2520        logio->port_id[2] = sp->fcport->d_id.b.domain;
2521        logio->vp_index = sp->vha->vp_idx;
2522}
2523
2524static void
2525qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2526{
2527        struct qla_hw_data *ha = sp->vha->hw;
2528
2529        mbx->entry_type = MBX_IOCB_TYPE;
2530        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2531        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2532        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2533            cpu_to_le16(sp->fcport->loop_id) :
2534            cpu_to_le16(sp->fcport->loop_id << 8);
2535        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2536        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2537            sp->fcport->d_id.b.al_pa);
2538        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2539        /* Implicit: mbx->mbx10 = 0. */
2540}
2541
2542static void
2543qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2544{
2545        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2546        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2547        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2548        logio->vp_index = sp->vha->vp_idx;
2549}
2550
2551static void
2552qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2553{
2554        struct qla_hw_data *ha = sp->vha->hw;
2555
2556        mbx->entry_type = MBX_IOCB_TYPE;
2557        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2558        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2559        if (HAS_EXTENDED_IDS(ha)) {
2560                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2561                mbx->mb10 = cpu_to_le16(BIT_0);
2562        } else {
2563                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2564        }
2565        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2566        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2567        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2568        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2569        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2570}
2571
2572static void
2573qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2574{
2575        uint32_t flags;
2576        uint64_t lun;
2577        struct fc_port *fcport = sp->fcport;
2578        scsi_qla_host_t *vha = fcport->vha;
2579        struct qla_hw_data *ha = vha->hw;
2580        struct srb_iocb *iocb = &sp->u.iocb_cmd;
2581        struct req_que *req = vha->req;
2582
2583        flags = iocb->u.tmf.flags;
2584        lun = iocb->u.tmf.lun;
2585
2586        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2587        tsk->entry_count = 1;
2588        tsk->handle = make_handle(req->id, tsk->handle);
2589        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2590        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2591        tsk->control_flags = cpu_to_le32(flags);
2592        tsk->port_id[0] = fcport->d_id.b.al_pa;
2593        tsk->port_id[1] = fcport->d_id.b.area;
2594        tsk->port_id[2] = fcport->d_id.b.domain;
2595        tsk->vp_index = fcport->vha->vp_idx;
2596
2597        if (flags == TCF_LUN_RESET) {
2598                int_to_scsilun(lun, &tsk->lun);
2599                host_to_fcp_swap((uint8_t *)&tsk->lun,
2600                        sizeof(tsk->lun));
2601        }
2602}
2603
2604void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2605{
2606        timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2607        sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2608        sp->free = qla2x00_sp_free;
2609        if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2610                init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2611        sp->start_timer = 1;
2612}
2613
2614static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2615{
2616        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2617
2618        kfree(sp->fcport);
2619
2620        if (elsio->u.els_logo.els_logo_pyld)
2621                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2622                    elsio->u.els_logo.els_logo_pyld,
2623                    elsio->u.els_logo.els_logo_pyld_dma);
2624
2625        del_timer(&elsio->timer);
2626        qla2x00_rel_sp(sp);
2627}
2628
2629static void
2630qla2x00_els_dcmd_iocb_timeout(void *data)
2631{
2632        srb_t *sp = data;
2633        fc_port_t *fcport = sp->fcport;
2634        struct scsi_qla_host *vha = sp->vha;
2635        struct srb_iocb *lio = &sp->u.iocb_cmd;
2636        unsigned long flags = 0;
2637        int res, h;
2638
2639        ql_dbg(ql_dbg_io, vha, 0x3069,
2640            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2641            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2642            fcport->d_id.b.al_pa);
2643
2644        /* Abort the exchange */
2645        res = qla24xx_async_abort_cmd(sp, false);
2646        if (res) {
2647                ql_dbg(ql_dbg_io, vha, 0x3070,
2648                    "mbx abort_command failed.\n");
2649                spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2650                for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2651                        if (sp->qpair->req->outstanding_cmds[h] == sp) {
2652                                sp->qpair->req->outstanding_cmds[h] = NULL;
2653                                break;
2654                        }
2655                }
2656                spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2657                complete(&lio->u.els_logo.comp);
2658        } else {
2659                ql_dbg(ql_dbg_io, vha, 0x3071,
2660                    "mbx abort_command success.\n");
2661        }
2662}
2663
2664static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2665{
2666        fc_port_t *fcport = sp->fcport;
2667        struct srb_iocb *lio = &sp->u.iocb_cmd;
2668        struct scsi_qla_host *vha = sp->vha;
2669
2670        ql_dbg(ql_dbg_io, vha, 0x3072,
2671            "%s hdl=%x, portid=%02x%02x%02x done\n",
2672            sp->name, sp->handle, fcport->d_id.b.domain,
2673            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2674
2675        complete(&lio->u.els_logo.comp);
2676}
2677
2678int
2679qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2680    port_id_t remote_did)
2681{
2682        srb_t *sp;
2683        fc_port_t *fcport = NULL;
2684        struct srb_iocb *elsio = NULL;
2685        struct qla_hw_data *ha = vha->hw;
2686        struct els_logo_payload logo_pyld;
2687        int rval = QLA_SUCCESS;
2688
2689        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2690        if (!fcport) {
2691               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2692               return -ENOMEM;
2693        }
2694
2695        /* Alloc SRB structure */
2696        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2697        if (!sp) {
2698                kfree(fcport);
2699                ql_log(ql_log_info, vha, 0x70e6,
2700                 "SRB allocation failed\n");
2701                return -ENOMEM;
2702        }
2703
2704        elsio = &sp->u.iocb_cmd;
2705        fcport->loop_id = 0xFFFF;
2706        fcport->d_id.b.domain = remote_did.b.domain;
2707        fcport->d_id.b.area = remote_did.b.area;
2708        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2709
2710        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2711            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2712
2713        sp->type = SRB_ELS_DCMD;
2714        sp->name = "ELS_DCMD";
2715        sp->fcport = fcport;
2716        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2717        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2718        init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2719        sp->done = qla2x00_els_dcmd_sp_done;
2720        sp->free = qla2x00_els_dcmd_sp_free;
2721
2722        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2723                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2724                            GFP_KERNEL);
2725
2726        if (!elsio->u.els_logo.els_logo_pyld) {
2727                sp->free(sp);
2728                return QLA_FUNCTION_FAILED;
2729        }
2730
2731        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2732
2733        elsio->u.els_logo.els_cmd = els_opcode;
2734        logo_pyld.opcode = els_opcode;
2735        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2736        logo_pyld.s_id[1] = vha->d_id.b.area;
2737        logo_pyld.s_id[2] = vha->d_id.b.domain;
2738        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2739        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2740
2741        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2742            sizeof(struct els_logo_payload));
2743        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2744        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2745                       elsio->u.els_logo.els_logo_pyld,
2746                       sizeof(*elsio->u.els_logo.els_logo_pyld));
2747
2748        rval = qla2x00_start_sp(sp);
2749        if (rval != QLA_SUCCESS) {
2750                sp->free(sp);
2751                return QLA_FUNCTION_FAILED;
2752        }
2753
2754        ql_dbg(ql_dbg_io, vha, 0x3074,
2755            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2756            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2757            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2758
2759        wait_for_completion(&elsio->u.els_logo.comp);
2760
2761        sp->free(sp);
2762        return rval;
2763}
2764
2765static void
2766qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2767{
2768        scsi_qla_host_t *vha = sp->vha;
2769        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2770
2771        els_iocb->entry_type = ELS_IOCB_TYPE;
2772        els_iocb->entry_count = 1;
2773        els_iocb->sys_define = 0;
2774        els_iocb->entry_status = 0;
2775        els_iocb->handle = sp->handle;
2776        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2777        els_iocb->tx_dsd_count = cpu_to_le16(1);
2778        els_iocb->vp_index = vha->vp_idx;
2779        els_iocb->sof_type = EST_SOFI3;
2780        els_iocb->rx_dsd_count = 0;
2781        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2782
2783        els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2784        els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2785        els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2786        /* For SID the byte order is different than DID */
2787        els_iocb->s_id[1] = vha->d_id.b.al_pa;
2788        els_iocb->s_id[2] = vha->d_id.b.area;
2789        els_iocb->s_id[0] = vha->d_id.b.domain;
2790
2791        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2792                els_iocb->control_flags = 0;
2793                els_iocb->tx_byte_count = els_iocb->tx_len =
2794                        cpu_to_le32(sizeof(struct els_plogi_payload));
2795                put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2796                                   &els_iocb->tx_address);
2797                els_iocb->rx_dsd_count = cpu_to_le16(1);
2798                els_iocb->rx_byte_count = els_iocb->rx_len =
2799                        cpu_to_le32(sizeof(struct els_plogi_payload));
2800                put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2801                                   &els_iocb->rx_address);
2802
2803                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2804                    "PLOGI ELS IOCB:\n");
2805                ql_dump_buffer(ql_log_info, vha, 0x0109,
2806                    (uint8_t *)els_iocb,
2807                    sizeof(*els_iocb));
2808        } else {
2809                els_iocb->control_flags = cpu_to_le16(1 << 13);
2810                els_iocb->tx_byte_count =
2811                        cpu_to_le32(sizeof(struct els_logo_payload));
2812                put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2813                                   &els_iocb->tx_address);
2814                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2815
2816                els_iocb->rx_byte_count = 0;
2817                els_iocb->rx_address = 0;
2818                els_iocb->rx_len = 0;
2819                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2820                       "LOGO ELS IOCB:");
2821                ql_dump_buffer(ql_log_info, vha, 0x010b,
2822                               els_iocb,
2823                               sizeof(*els_iocb));
2824        }
2825
2826        sp->vha->qla_stats.control_requests++;
2827}
2828
2829static void
2830qla2x00_els_dcmd2_iocb_timeout(void *data)
2831{
2832        srb_t *sp = data;
2833        fc_port_t *fcport = sp->fcport;
2834        struct scsi_qla_host *vha = sp->vha;
2835        unsigned long flags = 0;
2836        int res, h;
2837
2838        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2839            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2840            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2841
2842        /* Abort the exchange */
2843        res = qla24xx_async_abort_cmd(sp, false);
2844        ql_dbg(ql_dbg_io, vha, 0x3070,
2845            "mbx abort_command %s\n",
2846            (res == QLA_SUCCESS) ? "successful" : "failed");
2847        if (res) {
2848                spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2849                for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2850                        if (sp->qpair->req->outstanding_cmds[h] == sp) {
2851                                sp->qpair->req->outstanding_cmds[h] = NULL;
2852                                break;
2853                        }
2854                }
2855                spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2856                sp->done(sp, QLA_FUNCTION_TIMEOUT);
2857        }
2858}
2859
2860void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2861{
2862        if (els_plogi->els_plogi_pyld)
2863                dma_free_coherent(&vha->hw->pdev->dev,
2864                                  els_plogi->tx_size,
2865                                  els_plogi->els_plogi_pyld,
2866                                  els_plogi->els_plogi_pyld_dma);
2867
2868        if (els_plogi->els_resp_pyld)
2869                dma_free_coherent(&vha->hw->pdev->dev,
2870                                  els_plogi->rx_size,
2871                                  els_plogi->els_resp_pyld,
2872                                  els_plogi->els_resp_pyld_dma);
2873}
2874
2875static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2876{
2877        fc_port_t *fcport = sp->fcport;
2878        struct srb_iocb *lio = &sp->u.iocb_cmd;
2879        struct scsi_qla_host *vha = sp->vha;
2880        struct event_arg ea;
2881        struct qla_work_evt *e;
2882        struct fc_port *conflict_fcport;
2883        port_id_t cid;  /* conflict Nport id */
2884        const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2885        u16 lid;
2886
2887        ql_dbg(ql_dbg_disc, vha, 0x3072,
2888            "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2889            sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2890
2891        fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2892        del_timer(&sp->u.iocb_cmd.timer);
2893
2894        if (sp->flags & SRB_WAKEUP_ON_COMP)
2895                complete(&lio->u.els_plogi.comp);
2896        else {
2897                switch (le32_to_cpu(fw_status[0])) {
2898                case CS_DATA_UNDERRUN:
2899                case CS_COMPLETE:
2900                        memset(&ea, 0, sizeof(ea));
2901                        ea.fcport = fcport;
2902                        ea.rc = res;
2903                        qla_handle_els_plogi_done(vha, &ea);
2904                        break;
2905
2906                case CS_IOCB_ERROR:
2907                        switch (le32_to_cpu(fw_status[1])) {
2908                        case LSC_SCODE_PORTID_USED:
2909                                lid = le32_to_cpu(fw_status[2]) & 0xffff;
2910                                qlt_find_sess_invalidate_other(vha,
2911                                    wwn_to_u64(fcport->port_name),
2912                                    fcport->d_id, lid, &conflict_fcport);
2913                                if (conflict_fcport) {
2914                                        /*
2915                                         * Another fcport shares the same
2916                                         * loop_id & nport id; conflict
2917                                         * fcport needs to finish cleanup
2918                                         * before this fcport can proceed
2919                                         * to login.
2920                                         */
2921                                        conflict_fcport->conflict = fcport;
2922                                        fcport->login_pause = 1;
2923                                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
2924                                            "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2925                                            __func__, __LINE__,
2926                                            fcport->port_name,
2927                                            fcport->d_id.b24, lid);
2928                                } else {
2929                                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
2930                                            "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2931                                            __func__, __LINE__,
2932                                            fcport->port_name,
2933                                            fcport->d_id.b24, lid);
2934                                        qla2x00_clear_loop_id(fcport);
2935                                        set_bit(lid, vha->hw->loop_id_map);
2936                                        fcport->loop_id = lid;
2937                                        fcport->keep_nport_handle = 0;
2938                                        qlt_schedule_sess_for_deletion(fcport);
2939                                }
2940                                break;
2941
2942                        case LSC_SCODE_NPORT_USED:
2943                                cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2944                                        & 0xff;
2945                                cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
2946                                        & 0xff;
2947                                cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
2948                                cid.b.rsvd_1 = 0;
2949
2950                                ql_dbg(ql_dbg_disc, vha, 0x20ec,
2951                                    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2952                                    __func__, __LINE__, fcport->port_name,
2953                                    fcport->loop_id, cid.b24);
2954                                set_bit(fcport->loop_id,
2955                                    vha->hw->loop_id_map);
2956                                fcport->loop_id = FC_NO_LOOP_ID;
2957                                qla24xx_post_gnl_work(vha, fcport);
2958                                break;
2959
2960                        case LSC_SCODE_NOXCB:
2961                                vha->hw->exch_starvation++;
2962                                if (vha->hw->exch_starvation > 5) {
2963                                        ql_log(ql_log_warn, vha, 0xd046,
2964                                            "Exchange starvation. Resetting RISC\n");
2965                                        vha->hw->exch_starvation = 0;
2966                                        set_bit(ISP_ABORT_NEEDED,
2967                                            &vha->dpc_flags);
2968                                        qla2xxx_wake_dpc(vha);
2969                                }
2970                                fallthrough;
2971                        default:
2972                                ql_dbg(ql_dbg_disc, vha, 0x20eb,
2973                                    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2974                                    __func__, sp->fcport->port_name,
2975                                    fw_status[0], fw_status[1], fw_status[2]);
2976
2977                                fcport->flags &= ~FCF_ASYNC_SENT;
2978                                qla2x00_set_fcport_disc_state(fcport,
2979                                    DSC_LOGIN_FAILED);
2980                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2981                                break;
2982                        }
2983                        break;
2984
2985                default:
2986                        ql_dbg(ql_dbg_disc, vha, 0x20eb,
2987                            "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2988                            __func__, sp->fcport->port_name,
2989                            fw_status[0], fw_status[1], fw_status[2]);
2990
2991                        sp->fcport->flags &= ~FCF_ASYNC_SENT;
2992                        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2993                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2994                        break;
2995                }
2996
2997                e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2998                if (!e) {
2999                        struct srb_iocb *elsio = &sp->u.iocb_cmd;
3000
3001                        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3002                        sp->free(sp);
3003                        return;
3004                }
3005                e->u.iosb.sp = sp;
3006                qla2x00_post_work(vha, e);
3007        }
3008}
3009
3010int
3011qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3012    fc_port_t *fcport, bool wait)
3013{
3014        srb_t *sp;
3015        struct srb_iocb *elsio = NULL;
3016        struct qla_hw_data *ha = vha->hw;
3017        int rval = QLA_SUCCESS;
3018        void    *ptr, *resp_ptr;
3019
3020        /* Alloc SRB structure */
3021        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3022        if (!sp) {
3023                ql_log(ql_log_info, vha, 0x70e6,
3024                 "SRB allocation failed\n");
3025                fcport->flags &= ~FCF_ASYNC_ACTIVE;
3026                return -ENOMEM;
3027        }
3028
3029        fcport->flags |= FCF_ASYNC_SENT;
3030        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3031        elsio = &sp->u.iocb_cmd;
3032        ql_dbg(ql_dbg_io, vha, 0x3073,
3033            "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
3034
3035        sp->type = SRB_ELS_DCMD;
3036        sp->name = "ELS_DCMD";
3037        sp->fcport = fcport;
3038
3039        elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
3040        if (wait)
3041                sp->flags = SRB_WAKEUP_ON_COMP;
3042
3043        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
3044
3045        sp->done = qla2x00_els_dcmd2_sp_done;
3046        elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3047
3048        ptr = elsio->u.els_plogi.els_plogi_pyld =
3049            dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3050                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3051
3052        if (!elsio->u.els_plogi.els_plogi_pyld) {
3053                rval = QLA_FUNCTION_FAILED;
3054                goto out;
3055        }
3056
3057        resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3058            dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3059                &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3060
3061        if (!elsio->u.els_plogi.els_resp_pyld) {
3062                rval = QLA_FUNCTION_FAILED;
3063                goto out;
3064        }
3065
3066        ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3067
3068        memset(ptr, 0, sizeof(struct els_plogi_payload));
3069        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3070        memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3071            &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
3072
3073        elsio->u.els_plogi.els_cmd = els_opcode;
3074        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3075
3076        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3077        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3078            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3079            sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3080
3081        init_completion(&elsio->u.els_plogi.comp);
3082        rval = qla2x00_start_sp(sp);
3083        if (rval != QLA_SUCCESS) {
3084                rval = QLA_FUNCTION_FAILED;
3085        } else {
3086                ql_dbg(ql_dbg_disc, vha, 0x3074,
3087                    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3088                    sp->name, sp->handle, fcport->loop_id,
3089                    fcport->d_id.b24, vha->d_id.b24);
3090        }
3091
3092        if (wait) {
3093                wait_for_completion(&elsio->u.els_plogi.comp);
3094
3095                if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3096                        rval = QLA_FUNCTION_FAILED;
3097        } else {
3098                goto done;
3099        }
3100
3101out:
3102        fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3103        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3104        sp->free(sp);
3105done:
3106        return rval;
3107}
3108
3109static void
3110qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3111{
3112        struct bsg_job *bsg_job = sp->u.bsg_job;
3113        struct fc_bsg_request *bsg_request = bsg_job->request;
3114
3115        els_iocb->entry_type = ELS_IOCB_TYPE;
3116        els_iocb->entry_count = 1;
3117        els_iocb->sys_define = 0;
3118        els_iocb->entry_status = 0;
3119        els_iocb->handle = sp->handle;
3120        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3121        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3122        els_iocb->vp_index = sp->vha->vp_idx;
3123        els_iocb->sof_type = EST_SOFI3;
3124        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3125
3126        els_iocb->opcode =
3127            sp->type == SRB_ELS_CMD_RPT ?
3128            bsg_request->rqst_data.r_els.els_code :
3129            bsg_request->rqst_data.h_els.command_code;
3130        els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3131        els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3132        els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3133        els_iocb->control_flags = 0;
3134        els_iocb->rx_byte_count =
3135            cpu_to_le32(bsg_job->reply_payload.payload_len);
3136        els_iocb->tx_byte_count =
3137            cpu_to_le32(bsg_job->request_payload.payload_len);
3138
3139        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3140                           &els_iocb->tx_address);
3141        els_iocb->tx_len = cpu_to_le32(sg_dma_len
3142            (bsg_job->request_payload.sg_list));
3143
3144        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3145                           &els_iocb->rx_address);
3146        els_iocb->rx_len = cpu_to_le32(sg_dma_len
3147            (bsg_job->reply_payload.sg_list));
3148
3149        sp->vha->qla_stats.control_requests++;
3150}
3151
3152static void
3153qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3154{
3155        uint16_t        avail_dsds;
3156        struct dsd64    *cur_dsd;
3157        struct scatterlist *sg;
3158        int index;
3159        uint16_t tot_dsds;
3160        scsi_qla_host_t *vha = sp->vha;
3161        struct qla_hw_data *ha = vha->hw;
3162        struct bsg_job *bsg_job = sp->u.bsg_job;
3163        int entry_count = 1;
3164
3165        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3166        ct_iocb->entry_type = CT_IOCB_TYPE;
3167        ct_iocb->entry_status = 0;
3168        ct_iocb->handle1 = sp->handle;
3169        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3170        ct_iocb->status = cpu_to_le16(0);
3171        ct_iocb->control_flags = cpu_to_le16(0);
3172        ct_iocb->timeout = 0;
3173        ct_iocb->cmd_dsd_count =
3174            cpu_to_le16(bsg_job->request_payload.sg_cnt);
3175        ct_iocb->total_dsd_count =
3176            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3177        ct_iocb->req_bytecount =
3178            cpu_to_le32(bsg_job->request_payload.payload_len);
3179        ct_iocb->rsp_bytecount =
3180            cpu_to_le32(bsg_job->reply_payload.payload_len);
3181
3182        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3183                           &ct_iocb->req_dsd.address);
3184        ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3185
3186        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3187                           &ct_iocb->rsp_dsd.address);
3188        ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3189
3190        avail_dsds = 1;
3191        cur_dsd = &ct_iocb->rsp_dsd;
3192        index = 0;
3193        tot_dsds = bsg_job->reply_payload.sg_cnt;
3194
3195        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3196                cont_a64_entry_t *cont_pkt;
3197
3198                /* Allocate additional continuation packets? */
3199                if (avail_dsds == 0) {
3200                        /*
3201                        * Five DSDs are available in the Cont.
3202                        * Type 1 IOCB.
3203                               */
3204                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3205                            vha->hw->req_q_map[0]);
3206                        cur_dsd = cont_pkt->dsd;
3207                        avail_dsds = 5;
3208                        entry_count++;
3209                }
3210
3211                append_dsd64(&cur_dsd, sg);
3212                avail_dsds--;
3213        }
3214        ct_iocb->entry_count = entry_count;
3215
3216        sp->vha->qla_stats.control_requests++;
3217}
3218
3219static void
3220qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3221{
3222        uint16_t        avail_dsds;
3223        struct dsd64    *cur_dsd;
3224        struct scatterlist *sg;
3225        int index;
3226        uint16_t cmd_dsds, rsp_dsds;
3227        scsi_qla_host_t *vha = sp->vha;
3228        struct qla_hw_data *ha = vha->hw;
3229        struct bsg_job *bsg_job = sp->u.bsg_job;
3230        int entry_count = 1;
3231        cont_a64_entry_t *cont_pkt = NULL;
3232
3233        ct_iocb->entry_type = CT_IOCB_TYPE;
3234        ct_iocb->entry_status = 0;
3235        ct_iocb->sys_define = 0;
3236        ct_iocb->handle = sp->handle;
3237
3238        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3239        ct_iocb->vp_index = sp->vha->vp_idx;
3240        ct_iocb->comp_status = cpu_to_le16(0);
3241
3242        cmd_dsds = bsg_job->request_payload.sg_cnt;
3243        rsp_dsds = bsg_job->reply_payload.sg_cnt;
3244
3245        ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3246        ct_iocb->timeout = 0;
3247        ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3248        ct_iocb->cmd_byte_count =
3249            cpu_to_le32(bsg_job->request_payload.payload_len);
3250
3251        avail_dsds = 2;
3252        cur_dsd = ct_iocb->dsd;
3253        index = 0;
3254
3255        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3256                /* Allocate additional continuation packets? */
3257                if (avail_dsds == 0) {
3258                        /*
3259                         * Five DSDs are available in the Cont.
3260                         * Type 1 IOCB.
3261                         */
3262                        cont_pkt = qla2x00_prep_cont_type1_iocb(
3263                            vha, ha->req_q_map[0]);
3264                        cur_dsd = cont_pkt->dsd;
3265                        avail_dsds = 5;
3266                        entry_count++;
3267                }
3268
3269                append_dsd64(&cur_dsd, sg);
3270                avail_dsds--;
3271        }
3272
3273        index = 0;
3274
3275        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3276                /* Allocate additional continuation packets? */
3277                if (avail_dsds == 0) {
3278                        /*
3279                        * Five DSDs are available in the Cont.
3280                        * Type 1 IOCB.
3281                               */
3282                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3283                            ha->req_q_map[0]);
3284                        cur_dsd = cont_pkt->dsd;
3285                        avail_dsds = 5;
3286                        entry_count++;
3287                }
3288
3289                append_dsd64(&cur_dsd, sg);
3290                avail_dsds--;
3291        }
3292        ct_iocb->entry_count = entry_count;
3293}
3294
3295/*
3296 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3297 * @sp: command to send to the ISP
3298 *
3299 * Returns non-zero if a failure occurred, else zero.
3300 */
3301int
3302qla82xx_start_scsi(srb_t *sp)
3303{
3304        int             nseg;
3305        unsigned long   flags;
3306        struct scsi_cmnd *cmd;
3307        uint32_t        *clr_ptr;
3308        uint32_t        handle;
3309        uint16_t        cnt;
3310        uint16_t        req_cnt;
3311        uint16_t        tot_dsds;
3312        struct device_reg_82xx __iomem *reg;
3313        uint32_t dbval;
3314        __be32 *fcp_dl;
3315        uint8_t additional_cdb_len;
3316        struct ct6_dsd *ctx;
3317        struct scsi_qla_host *vha = sp->vha;
3318        struct qla_hw_data *ha = vha->hw;
3319        struct req_que *req = NULL;
3320        struct rsp_que *rsp = NULL;
3321
3322        /* Setup device pointers. */
3323        reg = &ha->iobase->isp82;
3324        cmd = GET_CMD_SP(sp);
3325        req = vha->req;
3326        rsp = ha->rsp_q_map[0];
3327
3328        /* So we know we haven't pci_map'ed anything yet */
3329        tot_dsds = 0;
3330
3331        dbval = 0x04 | (ha->portnum << 5);
3332
3333        /* Send marker if required */
3334        if (vha->marker_needed != 0) {
3335                if (qla2x00_marker(vha, ha->base_qpair,
3336                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3337                        ql_log(ql_log_warn, vha, 0x300c,
3338                            "qla2x00_marker failed for cmd=%p.\n", cmd);
3339                        return QLA_FUNCTION_FAILED;
3340                }
3341                vha->marker_needed = 0;
3342        }
3343
3344        /* Acquire ring specific lock */
3345        spin_lock_irqsave(&ha->hardware_lock, flags);
3346
3347        handle = qla2xxx_get_next_handle(req);
3348        if (handle == 0)
3349                goto queuing_error;
3350
3351        /* Map the sg table so we have an accurate count of sg entries needed */
3352        if (scsi_sg_count(cmd)) {
3353                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3354                    scsi_sg_count(cmd), cmd->sc_data_direction);
3355                if (unlikely(!nseg))
3356                        goto queuing_error;
3357        } else
3358                nseg = 0;
3359
3360        tot_dsds = nseg;
3361
3362        if (tot_dsds > ql2xshiftctondsd) {
3363                struct cmd_type_6 *cmd_pkt;
3364                uint16_t more_dsd_lists = 0;
3365                struct dsd_dma *dsd_ptr;
3366                uint16_t i;
3367
3368                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3369                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3370                        ql_dbg(ql_dbg_io, vha, 0x300d,
3371                            "Num of DSD list %d is than %d for cmd=%p.\n",
3372                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3373                            cmd);
3374                        goto queuing_error;
3375                }
3376
3377                if (more_dsd_lists <= ha->gbl_dsd_avail)
3378                        goto sufficient_dsds;
3379                else
3380                        more_dsd_lists -= ha->gbl_dsd_avail;
3381
3382                for (i = 0; i < more_dsd_lists; i++) {
3383                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3384                        if (!dsd_ptr) {
3385                                ql_log(ql_log_fatal, vha, 0x300e,
3386                                    "Failed to allocate memory for dsd_dma "
3387                                    "for cmd=%p.\n", cmd);
3388                                goto queuing_error;
3389                        }
3390
3391                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3392                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3393                        if (!dsd_ptr->dsd_addr) {
3394                                kfree(dsd_ptr);
3395                                ql_log(ql_log_fatal, vha, 0x300f,
3396                                    "Failed to allocate memory for dsd_addr "
3397                                    "for cmd=%p.\n", cmd);
3398                                goto queuing_error;
3399                        }
3400                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3401                        ha->gbl_dsd_avail++;
3402                }
3403
3404sufficient_dsds:
3405                req_cnt = 1;
3406
3407                if (req->cnt < (req_cnt + 2)) {
3408                        cnt = (uint16_t)rd_reg_dword_relaxed(
3409                                &reg->req_q_out[0]);
3410                        if (req->ring_index < cnt)
3411                                req->cnt = cnt - req->ring_index;
3412                        else
3413                                req->cnt = req->length -
3414                                        (req->ring_index - cnt);
3415                        if (req->cnt < (req_cnt + 2))
3416                                goto queuing_error;
3417                }
3418
3419                ctx = sp->u.scmd.ct6_ctx =
3420                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3421                if (!ctx) {
3422                        ql_log(ql_log_fatal, vha, 0x3010,
3423                            "Failed to allocate ctx for cmd=%p.\n", cmd);
3424                        goto queuing_error;
3425                }
3426
3427                memset(ctx, 0, sizeof(struct ct6_dsd));
3428                ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3429                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3430                if (!ctx->fcp_cmnd) {
3431                        ql_log(ql_log_fatal, vha, 0x3011,
3432                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3433                        goto queuing_error;
3434                }
3435
3436                /* Initialize the DSD list and dma handle */
3437                INIT_LIST_HEAD(&ctx->dsd_list);
3438                ctx->dsd_use_cnt = 0;
3439
3440                if (cmd->cmd_len > 16) {
3441                        additional_cdb_len = cmd->cmd_len - 16;
3442                        if ((cmd->cmd_len % 4) != 0) {
3443                                /* SCSI command bigger than 16 bytes must be
3444                                 * multiple of 4
3445                                 */
3446                                ql_log(ql_log_warn, vha, 0x3012,
3447                                    "scsi cmd len %d not multiple of 4 "
3448                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
3449                                goto queuing_error_fcp_cmnd;
3450                        }
3451                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3452                } else {
3453                        additional_cdb_len = 0;
3454                        ctx->fcp_cmnd_len = 12 + 16 + 4;
3455                }
3456
3457                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3458                cmd_pkt->handle = make_handle(req->id, handle);
3459
3460                /* Zero out remaining portion of packet. */
3461                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3462                clr_ptr = (uint32_t *)cmd_pkt + 2;
3463                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3464                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3465
3466                /* Set NPORT-ID and LUN number*/
3467                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3468                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3469                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3470                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3471                cmd_pkt->vp_index = sp->vha->vp_idx;
3472
3473                /* Build IOCB segments */
3474                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3475                        goto queuing_error_fcp_cmnd;
3476
3477                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3478                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3479
3480                /* build FCP_CMND IU */
3481                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3482                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3483
3484                if (cmd->sc_data_direction == DMA_TO_DEVICE)
3485                        ctx->fcp_cmnd->additional_cdb_len |= 1;
3486                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3487                        ctx->fcp_cmnd->additional_cdb_len |= 2;
3488
3489                /* Populate the FCP_PRIO. */
3490                if (ha->flags.fcp_prio_enabled)
3491                        ctx->fcp_cmnd->task_attribute |=
3492                            sp->fcport->fcp_prio << 3;
3493
3494                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3495
3496                fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3497                    additional_cdb_len);
3498                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3499
3500                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3501                put_unaligned_le64(ctx->fcp_cmnd_dma,
3502                                   &cmd_pkt->fcp_cmnd_dseg_address);
3503
3504                sp->flags |= SRB_FCP_CMND_DMA_VALID;
3505                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3506                /* Set total data segment count. */
3507                cmd_pkt->entry_count = (uint8_t)req_cnt;
3508                /* Specify response queue number where
3509                 * completion should happen
3510                 */
3511                cmd_pkt->entry_status = (uint8_t) rsp->id;
3512        } else {
3513                struct cmd_type_7 *cmd_pkt;
3514
3515                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3516                if (req->cnt < (req_cnt + 2)) {
3517                        cnt = (uint16_t)rd_reg_dword_relaxed(
3518                            &reg->req_q_out[0]);
3519                        if (req->ring_index < cnt)
3520                                req->cnt = cnt - req->ring_index;
3521                        else
3522                                req->cnt = req->length -
3523                                        (req->ring_index - cnt);
3524                }
3525                if (req->cnt < (req_cnt + 2))
3526                        goto queuing_error;
3527
3528                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3529                cmd_pkt->handle = make_handle(req->id, handle);
3530
3531                /* Zero out remaining portion of packet. */
3532                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3533                clr_ptr = (uint32_t *)cmd_pkt + 2;
3534                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3535                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3536
3537                /* Set NPORT-ID and LUN number*/
3538                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3539                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3540                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3541                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3542                cmd_pkt->vp_index = sp->vha->vp_idx;
3543
3544                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3545                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3546                    sizeof(cmd_pkt->lun));
3547
3548                /* Populate the FCP_PRIO. */
3549                if (ha->flags.fcp_prio_enabled)
3550                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3551
3552                /* Load SCSI command packet. */
3553                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3554                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3555
3556                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3557
3558                /* Build IOCB segments */
3559                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3560
3561                /* Set total data segment count. */
3562                cmd_pkt->entry_count = (uint8_t)req_cnt;
3563                /* Specify response queue number where
3564                 * completion should happen.
3565                 */
3566                cmd_pkt->entry_status = (uint8_t) rsp->id;
3567
3568        }
3569        /* Build command packet. */
3570        req->current_outstanding_cmd = handle;
3571        req->outstanding_cmds[handle] = sp;
3572        sp->handle = handle;
3573        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3574        req->cnt -= req_cnt;
3575        wmb();
3576
3577        /* Adjust ring index. */
3578        req->ring_index++;
3579        if (req->ring_index == req->length) {
3580                req->ring_index = 0;
3581                req->ring_ptr = req->ring;
3582        } else
3583                req->ring_ptr++;
3584
3585        sp->flags |= SRB_DMA_VALID;
3586
3587        /* Set chip new ring index. */
3588        /* write, read and verify logic */
3589        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3590        if (ql2xdbwr)
3591                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3592        else {
3593                wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3594                wmb();
3595                while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3596                        wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3597                        wmb();
3598                }
3599        }
3600
3601        /* Manage unprocessed RIO/ZIO commands in response queue. */
3602        if (vha->flags.process_response_queue &&
3603            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3604                qla24xx_process_response_queue(vha, rsp);
3605
3606        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3607        return QLA_SUCCESS;
3608
3609queuing_error_fcp_cmnd:
3610        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3611queuing_error:
3612        if (tot_dsds)
3613                scsi_dma_unmap(cmd);
3614
3615        if (sp->u.scmd.crc_ctx) {
3616                mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3617                sp->u.scmd.crc_ctx = NULL;
3618        }
3619        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3620
3621        return QLA_FUNCTION_FAILED;
3622}
3623
3624static void
3625qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3626{
3627        struct srb_iocb *aio = &sp->u.iocb_cmd;
3628        scsi_qla_host_t *vha = sp->vha;
3629        struct req_que *req = sp->qpair->req;
3630        srb_t *orig_sp = sp->cmd_sp;
3631
3632        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3633        abt_iocb->entry_type = ABORT_IOCB_TYPE;
3634        abt_iocb->entry_count = 1;
3635        abt_iocb->handle = make_handle(req->id, sp->handle);
3636        if (sp->fcport) {
3637                abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3638                abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3639                abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3640                abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3641        }
3642        abt_iocb->handle_to_abort =
3643                make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3644                            aio->u.abt.cmd_hndl);
3645        abt_iocb->vp_index = vha->vp_idx;
3646        abt_iocb->req_que_no = aio->u.abt.req_que_no;
3647
3648        /* need to pass original sp */
3649        if (orig_sp)
3650                qla_nvme_abort_set_option(abt_iocb, orig_sp);
3651
3652        /* Send the command to the firmware */
3653        wmb();
3654}
3655
3656static void
3657qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3658{
3659        int i, sz;
3660
3661        mbx->entry_type = MBX_IOCB_TYPE;
3662        mbx->handle = sp->handle;
3663        sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3664
3665        for (i = 0; i < sz; i++)
3666                mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3667}
3668
3669static void
3670qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3671{
3672        sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3673        qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3674        ct_pkt->handle = sp->handle;
3675}
3676
3677static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3678        struct nack_to_isp *nack)
3679{
3680        struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3681
3682        nack->entry_type = NOTIFY_ACK_TYPE;
3683        nack->entry_count = 1;
3684        nack->ox_id = ntfy->ox_id;
3685
3686        nack->u.isp24.handle = sp->handle;
3687        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3688        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3689                nack->u.isp24.flags = ntfy->u.isp24.flags &
3690                        cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3691        }
3692        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3693        nack->u.isp24.status = ntfy->u.isp24.status;
3694        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3695        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3696        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3697        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3698        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3699        nack->u.isp24.srr_flags = 0;
3700        nack->u.isp24.srr_reject_code = 0;
3701        nack->u.isp24.srr_reject_code_expl = 0;
3702        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3703}
3704
3705/*
3706 * Build NVME LS request
3707 */
3708static void
3709qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3710{
3711        struct srb_iocb *nvme;
3712
3713        nvme = &sp->u.iocb_cmd;
3714        cmd_pkt->entry_type = PT_LS4_REQUEST;
3715        cmd_pkt->entry_count = 1;
3716        cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3717
3718        cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3719        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3720        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3721
3722        cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3723        cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3724        cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3725        put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3726
3727        cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3728        cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3729        cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3730        put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3731}
3732
3733static void
3734qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3735{
3736        int map, pos;
3737
3738        vce->entry_type = VP_CTRL_IOCB_TYPE;
3739        vce->handle = sp->handle;
3740        vce->entry_count = 1;
3741        vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3742        vce->vp_count = cpu_to_le16(1);
3743
3744        /*
3745         * index map in firmware starts with 1; decrement index
3746         * this is ok as we never use index 0
3747         */
3748        map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3749        pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3750        vce->vp_idx_map[map] |= 1 << pos;
3751}
3752
3753static void
3754qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3755{
3756        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3757        logio->control_flags =
3758            cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3759
3760        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3761        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3762        logio->port_id[1] = sp->fcport->d_id.b.area;
3763        logio->port_id[2] = sp->fcport->d_id.b.domain;
3764        logio->vp_index = sp->fcport->vha->vp_idx;
3765}
3766
3767int
3768qla2x00_start_sp(srb_t *sp)
3769{
3770        int rval = QLA_SUCCESS;
3771        scsi_qla_host_t *vha = sp->vha;
3772        struct qla_hw_data *ha = vha->hw;
3773        struct qla_qpair *qp = sp->qpair;
3774        void *pkt;
3775        unsigned long flags;
3776
3777        if (vha->hw->flags.eeh_busy)
3778                return -EIO;
3779
3780        spin_lock_irqsave(qp->qp_lock_ptr, flags);
3781        pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3782        if (!pkt) {
3783                rval = EAGAIN;
3784                ql_log(ql_log_warn, vha, 0x700c,
3785                    "qla2x00_alloc_iocbs failed.\n");
3786                goto done;
3787        }
3788
3789        switch (sp->type) {
3790        case SRB_LOGIN_CMD:
3791                IS_FWI2_CAPABLE(ha) ?
3792                    qla24xx_login_iocb(sp, pkt) :
3793                    qla2x00_login_iocb(sp, pkt);
3794                break;
3795        case SRB_PRLI_CMD:
3796                qla24xx_prli_iocb(sp, pkt);
3797                break;
3798        case SRB_LOGOUT_CMD:
3799                IS_FWI2_CAPABLE(ha) ?
3800                    qla24xx_logout_iocb(sp, pkt) :
3801                    qla2x00_logout_iocb(sp, pkt);
3802                break;
3803        case SRB_ELS_CMD_RPT:
3804        case SRB_ELS_CMD_HST:
3805                qla24xx_els_iocb(sp, pkt);
3806                break;
3807        case SRB_CT_CMD:
3808                IS_FWI2_CAPABLE(ha) ?
3809                    qla24xx_ct_iocb(sp, pkt) :
3810                    qla2x00_ct_iocb(sp, pkt);
3811                break;
3812        case SRB_ADISC_CMD:
3813                IS_FWI2_CAPABLE(ha) ?
3814                    qla24xx_adisc_iocb(sp, pkt) :
3815                    qla2x00_adisc_iocb(sp, pkt);
3816                break;
3817        case SRB_TM_CMD:
3818                IS_QLAFX00(ha) ?
3819                    qlafx00_tm_iocb(sp, pkt) :
3820                    qla24xx_tm_iocb(sp, pkt);
3821                break;
3822        case SRB_FXIOCB_DCMD:
3823        case SRB_FXIOCB_BCMD:
3824                qlafx00_fxdisc_iocb(sp, pkt);
3825                break;
3826        case SRB_NVME_LS:
3827                qla_nvme_ls(sp, pkt);
3828                break;
3829        case SRB_ABT_CMD:
3830                IS_QLAFX00(ha) ?
3831                        qlafx00_abort_iocb(sp, pkt) :
3832                        qla24xx_abort_iocb(sp, pkt);
3833                break;
3834        case SRB_ELS_DCMD:
3835                qla24xx_els_logo_iocb(sp, pkt);
3836                break;
3837        case SRB_CT_PTHRU_CMD:
3838                qla2x00_ctpthru_cmd_iocb(sp, pkt);
3839                break;
3840        case SRB_MB_IOCB:
3841                qla2x00_mb_iocb(sp, pkt);
3842                break;
3843        case SRB_NACK_PLOGI:
3844        case SRB_NACK_PRLI:
3845        case SRB_NACK_LOGO:
3846                qla2x00_send_notify_ack_iocb(sp, pkt);
3847                break;
3848        case SRB_CTRL_VP:
3849                qla25xx_ctrlvp_iocb(sp, pkt);
3850                break;
3851        case SRB_PRLO_CMD:
3852                qla24xx_prlo_iocb(sp, pkt);
3853                break;
3854        default:
3855                break;
3856        }
3857
3858        if (sp->start_timer)
3859                add_timer(&sp->u.iocb_cmd.timer);
3860
3861        wmb();
3862        qla2x00_start_iocbs(vha, qp->req);
3863done:
3864        spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3865        return rval;
3866}
3867
3868static void
3869qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3870                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3871{
3872        uint16_t avail_dsds;
3873        struct dsd64 *cur_dsd;
3874        uint32_t req_data_len = 0;
3875        uint32_t rsp_data_len = 0;
3876        struct scatterlist *sg;
3877        int index;
3878        int entry_count = 1;
3879        struct bsg_job *bsg_job = sp->u.bsg_job;
3880
3881        /*Update entry type to indicate bidir command */
3882        put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3883
3884        /* Set the transfer direction, in this set both flags
3885         * Also set the BD_WRAP_BACK flag, firmware will take care
3886         * assigning DID=SID for outgoing pkts.
3887         */
3888        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3889        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3890        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3891                                                        BD_WRAP_BACK);
3892
3893        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3894        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3895        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3896        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3897
3898        vha->bidi_stats.transfer_bytes += req_data_len;
3899        vha->bidi_stats.io_count++;
3900
3901        vha->qla_stats.output_bytes += req_data_len;
3902        vha->qla_stats.output_requests++;
3903
3904        /* Only one dsd is available for bidirectional IOCB, remaining dsds
3905         * are bundled in continuation iocb
3906         */
3907        avail_dsds = 1;
3908        cur_dsd = &cmd_pkt->fcp_dsd;
3909
3910        index = 0;
3911
3912        for_each_sg(bsg_job->request_payload.sg_list, sg,
3913                                bsg_job->request_payload.sg_cnt, index) {
3914                cont_a64_entry_t *cont_pkt;
3915
3916                /* Allocate additional continuation packets */
3917                if (avail_dsds == 0) {
3918                        /* Continuation type 1 IOCB can accomodate
3919                         * 5 DSDS
3920                         */
3921                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3922                        cur_dsd = cont_pkt->dsd;
3923                        avail_dsds = 5;
3924                        entry_count++;
3925                }
3926                append_dsd64(&cur_dsd, sg);
3927                avail_dsds--;
3928        }
3929        /* For read request DSD will always goes to continuation IOCB
3930         * and follow the write DSD. If there is room on the current IOCB
3931         * then it is added to that IOCB else new continuation IOCB is
3932         * allocated.
3933         */
3934        for_each_sg(bsg_job->reply_payload.sg_list, sg,
3935                                bsg_job->reply_payload.sg_cnt, index) {
3936                cont_a64_entry_t *cont_pkt;
3937
3938                /* Allocate additional continuation packets */
3939                if (avail_dsds == 0) {
3940                        /* Continuation type 1 IOCB can accomodate
3941                         * 5 DSDS
3942                         */
3943                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3944                        cur_dsd = cont_pkt->dsd;
3945                        avail_dsds = 5;
3946                        entry_count++;
3947                }
3948                append_dsd64(&cur_dsd, sg);
3949                avail_dsds--;
3950        }
3951        /* This value should be same as number of IOCB required for this cmd */
3952        cmd_pkt->entry_count = entry_count;
3953}
3954
3955int
3956qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3957{
3958
3959        struct qla_hw_data *ha = vha->hw;
3960        unsigned long flags;
3961        uint32_t handle;
3962        uint16_t req_cnt;
3963        uint16_t cnt;
3964        uint32_t *clr_ptr;
3965        struct cmd_bidir *cmd_pkt = NULL;
3966        struct rsp_que *rsp;
3967        struct req_que *req;
3968        int rval = EXT_STATUS_OK;
3969
3970        rval = QLA_SUCCESS;
3971
3972        rsp = ha->rsp_q_map[0];
3973        req = vha->req;
3974
3975        /* Send marker if required */
3976        if (vha->marker_needed != 0) {
3977                if (qla2x00_marker(vha, ha->base_qpair,
3978                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3979                        return EXT_STATUS_MAILBOX;
3980                vha->marker_needed = 0;
3981        }
3982
3983        /* Acquire ring specific lock */
3984        spin_lock_irqsave(&ha->hardware_lock, flags);
3985
3986        handle = qla2xxx_get_next_handle(req);
3987        if (handle == 0) {
3988                rval = EXT_STATUS_BUSY;
3989                goto queuing_error;
3990        }
3991
3992        /* Calculate number of IOCB required */
3993        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3994
3995        /* Check for room on request queue. */
3996        if (req->cnt < req_cnt + 2) {
3997                if (IS_SHADOW_REG_CAPABLE(ha)) {
3998                        cnt = *req->out_ptr;
3999                } else {
4000                        cnt = rd_reg_dword_relaxed(req->req_q_out);
4001                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4002                                goto queuing_error;
4003                }
4004
4005                if  (req->ring_index < cnt)
4006                        req->cnt = cnt - req->ring_index;
4007                else
4008                        req->cnt = req->length -
4009                                (req->ring_index - cnt);
4010        }
4011        if (req->cnt < req_cnt + 2) {
4012                rval = EXT_STATUS_BUSY;
4013                goto queuing_error;
4014        }
4015
4016        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
4017        cmd_pkt->handle = make_handle(req->id, handle);
4018
4019        /* Zero out remaining portion of packet. */
4020        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4021        clr_ptr = (uint32_t *)cmd_pkt + 2;
4022        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4023
4024        /* Set NPORT-ID  (of vha)*/
4025        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
4026        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
4027        cmd_pkt->port_id[1] = vha->d_id.b.area;
4028        cmd_pkt->port_id[2] = vha->d_id.b.domain;
4029
4030        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
4031        cmd_pkt->entry_status = (uint8_t) rsp->id;
4032        /* Build command packet. */
4033        req->current_outstanding_cmd = handle;
4034        req->outstanding_cmds[handle] = sp;
4035        sp->handle = handle;
4036        req->cnt -= req_cnt;
4037
4038        /* Send the command to the firmware */
4039        wmb();
4040        qla2x00_start_iocbs(vha, req);
4041queuing_error:
4042        spin_unloc