linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * QLogic qlcnic NIC Driver
   4 * Copyright (c) 2009-2013 QLogic Corporation
   5 */
   6
   7#include <linux/types.h>
   8
   9#include "qlcnic_sriov.h"
  10#include "qlcnic.h"
  11#include "qlcnic_83xx_hw.h"
  12
  13#define QLC_BC_COMMAND  0
  14#define QLC_BC_RESPONSE 1
  15
  16#define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
  17#define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
  18
  19#define QLC_BC_MSG              0
  20#define QLC_BC_CFREE            1
  21#define QLC_BC_FLR              2
  22#define QLC_BC_HDR_SZ           16
  23#define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
  24
  25#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
  26#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
  27
  28#define QLC_83XX_VF_RESET_FAIL_THRESH   8
  29#define QLC_BC_CMD_MAX_RETRY_CNT        5
  30
  31static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
  32static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
  33static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
  34static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
  35static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
  36static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
  37static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
  38                                  struct qlcnic_cmd_args *);
  39static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
  40static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
  41static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
  42static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
  43static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
  44                                        struct qlcnic_cmd_args *);
  45
  46static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
  47        .read_crb                       = qlcnic_83xx_read_crb,
  48        .write_crb                      = qlcnic_83xx_write_crb,
  49        .read_reg                       = qlcnic_83xx_rd_reg_indirect,
  50        .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
  51        .get_mac_address                = qlcnic_83xx_get_mac_address,
  52        .setup_intr                     = qlcnic_83xx_setup_intr,
  53        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
  54        .mbx_cmd                        = qlcnic_sriov_issue_cmd,
  55        .get_func_no                    = qlcnic_83xx_get_func_no,
  56        .api_lock                       = qlcnic_83xx_cam_lock,
  57        .api_unlock                     = qlcnic_83xx_cam_unlock,
  58        .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
  59        .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
  60        .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
  61        .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
  62        .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
  63        .setup_link_event               = qlcnic_83xx_setup_link_event,
  64        .get_nic_info                   = qlcnic_83xx_get_nic_info,
  65        .get_pci_info                   = qlcnic_83xx_get_pci_info,
  66        .set_nic_info                   = qlcnic_83xx_set_nic_info,
  67        .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
  68        .napi_enable                    = qlcnic_83xx_napi_enable,
  69        .napi_disable                   = qlcnic_83xx_napi_disable,
  70        .config_intr_coal               = qlcnic_83xx_config_intr_coal,
  71        .config_rss                     = qlcnic_83xx_config_rss,
  72        .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
  73        .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
  74        .change_l2_filter               = qlcnic_83xx_change_l2_filter,
  75        .get_board_info                 = qlcnic_83xx_get_port_info,
  76        .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
  77        .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
  78        .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
  79        .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
  80        .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
  81};
  82
  83static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
  84        .config_bridged_mode    = qlcnic_config_bridged_mode,
  85        .config_led             = qlcnic_config_led,
  86        .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
  87        .napi_add               = qlcnic_83xx_napi_add,
  88        .napi_del               = qlcnic_83xx_napi_del,
  89        .shutdown               = qlcnic_sriov_vf_shutdown,
  90        .resume                 = qlcnic_sriov_vf_resume,
  91        .config_ipaddr          = qlcnic_83xx_config_ipaddr,
  92        .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
  93};
  94
  95static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
  96        {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
  97        {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
  98        {QLCNIC_BC_CMD_GET_ACL, 3, 14},
  99        {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
 100};
 101
 102static inline bool qlcnic_sriov_bc_msg_check(u32 val)
 103{
 104        return (val & (1 << QLC_BC_MSG)) ? true : false;
 105}
 106
 107static inline bool qlcnic_sriov_channel_free_check(u32 val)
 108{
 109        return (val & (1 << QLC_BC_CFREE)) ? true : false;
 110}
 111
 112static inline bool qlcnic_sriov_flr_check(u32 val)
 113{
 114        return (val & (1 << QLC_BC_FLR)) ? true : false;
 115}
 116
 117static inline u8 qlcnic_sriov_target_func_id(u32 val)
 118{
 119        return (val >> 4) & 0xff;
 120}
 121
 122static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
 123{
 124        struct pci_dev *dev = adapter->pdev;
 125        int pos;
 126        u16 stride, offset;
 127
 128        if (qlcnic_sriov_vf_check(adapter))
 129                return 0;
 130
 131        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
 132        if (!pos)
 133                return 0;
 134        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
 135        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
 136
 137        return (dev->devfn + offset + stride * vf_id) & 0xff;
 138}
 139
 140int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
 141{
 142        struct qlcnic_sriov *sriov;
 143        struct qlcnic_back_channel *bc;
 144        struct workqueue_struct *wq;
 145        struct qlcnic_vport *vp;
 146        struct qlcnic_vf_info *vf;
 147        int err, i;
 148
 149        if (!qlcnic_sriov_enable_check(adapter))
 150                return -EIO;
 151
 152        sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
 153        if (!sriov)
 154                return -ENOMEM;
 155
 156        adapter->ahw->sriov = sriov;
 157        sriov->num_vfs = num_vfs;
 158        bc = &sriov->bc;
 159        sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info),
 160                                 GFP_KERNEL);
 161        if (!sriov->vf_info) {
 162                err = -ENOMEM;
 163                goto qlcnic_free_sriov;
 164        }
 165
 166        wq = create_singlethread_workqueue("bc-trans");
 167        if (wq == NULL) {
 168                err = -ENOMEM;
 169                dev_err(&adapter->pdev->dev,
 170                        "Cannot create bc-trans workqueue\n");
 171                goto qlcnic_free_vf_info;
 172        }
 173
 174        bc->bc_trans_wq = wq;
 175
 176        wq = create_singlethread_workqueue("async");
 177        if (wq == NULL) {
 178                err = -ENOMEM;
 179                dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
 180                goto qlcnic_destroy_trans_wq;
 181        }
 182
 183        bc->bc_async_wq =  wq;
 184        INIT_LIST_HEAD(&bc->async_cmd_list);
 185        INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
 186        spin_lock_init(&bc->queue_lock);
 187        bc->adapter = adapter;
 188
 189        for (i = 0; i < num_vfs; i++) {
 190                vf = &sriov->vf_info[i];
 191                vf->adapter = adapter;
 192                vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
 193                mutex_init(&vf->send_cmd_lock);
 194                spin_lock_init(&vf->vlan_list_lock);
 195                INIT_LIST_HEAD(&vf->rcv_act.wait_list);
 196                INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
 197                spin_lock_init(&vf->rcv_act.lock);
 198                spin_lock_init(&vf->rcv_pend.lock);
 199                init_completion(&vf->ch_free_cmpl);
 200
 201                INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
 202
 203                if (qlcnic_sriov_pf_check(adapter)) {
 204                        vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
 205                        if (!vp) {
 206                                err = -ENOMEM;
 207                                goto qlcnic_destroy_async_wq;
 208                        }
 209                        sriov->vf_info[i].vp = vp;
 210                        vp->vlan_mode = QLC_GUEST_VLAN_MODE;
 211                        vp->max_tx_bw = MAX_BW;
 212                        vp->min_tx_bw = MIN_BW;
 213                        vp->spoofchk = false;
 214                        eth_random_addr(vp->mac);
 215                        dev_info(&adapter->pdev->dev,
 216                                 "MAC Address %pM is configured for VF %d\n",
 217                                 vp->mac, i);
 218                }
 219        }
 220
 221        return 0;
 222
 223qlcnic_destroy_async_wq:
 224        destroy_workqueue(bc->bc_async_wq);
 225
 226qlcnic_destroy_trans_wq:
 227        destroy_workqueue(bc->bc_trans_wq);
 228
 229qlcnic_free_vf_info:
 230        kfree(sriov->vf_info);
 231
 232qlcnic_free_sriov:
 233        kfree(adapter->ahw->sriov);
 234        return err;
 235}
 236
 237void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
 238{
 239        struct qlcnic_bc_trans *trans;
 240        struct qlcnic_cmd_args cmd;
 241        unsigned long flags;
 242
 243        spin_lock_irqsave(&t_list->lock, flags);
 244
 245        while (!list_empty(&t_list->wait_list)) {
 246                trans = list_first_entry(&t_list->wait_list,
 247                                         struct qlcnic_bc_trans, list);
 248                list_del(&trans->list);
 249                t_list->count--;
 250                cmd.req.arg = (u32 *)trans->req_pay;
 251                cmd.rsp.arg = (u32 *)trans->rsp_pay;
 252                qlcnic_free_mbx_args(&cmd);
 253                qlcnic_sriov_cleanup_transaction(trans);
 254        }
 255
 256        spin_unlock_irqrestore(&t_list->lock, flags);
 257}
 258
 259void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 260{
 261        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 262        struct qlcnic_back_channel *bc = &sriov->bc;
 263        struct qlcnic_vf_info *vf;
 264        int i;
 265
 266        if (!qlcnic_sriov_enable_check(adapter))
 267                return;
 268
 269        qlcnic_sriov_cleanup_async_list(bc);
 270        destroy_workqueue(bc->bc_async_wq);
 271
 272        for (i = 0; i < sriov->num_vfs; i++) {
 273                vf = &sriov->vf_info[i];
 274                qlcnic_sriov_cleanup_list(&vf->rcv_pend);
 275                cancel_work_sync(&vf->trans_work);
 276                qlcnic_sriov_cleanup_list(&vf->rcv_act);
 277        }
 278
 279        destroy_workqueue(bc->bc_trans_wq);
 280
 281        for (i = 0; i < sriov->num_vfs; i++)
 282                kfree(sriov->vf_info[i].vp);
 283
 284        kfree(sriov->vf_info);
 285        kfree(adapter->ahw->sriov);
 286}
 287
 288static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
 289{
 290        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
 291        qlcnic_sriov_cfg_bc_intr(adapter, 0);
 292        __qlcnic_sriov_cleanup(adapter);
 293}
 294
 295void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 296{
 297        if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
 298                return;
 299
 300        qlcnic_sriov_free_vlans(adapter);
 301
 302        if (qlcnic_sriov_pf_check(adapter))
 303                qlcnic_sriov_pf_cleanup(adapter);
 304
 305        if (qlcnic_sriov_vf_check(adapter))
 306                qlcnic_sriov_vf_cleanup(adapter);
 307}
 308
 309static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
 310                                    u32 *pay, u8 pci_func, u8 size)
 311{
 312        struct qlcnic_hardware_context *ahw = adapter->ahw;
 313        struct qlcnic_mailbox *mbx = ahw->mailbox;
 314        struct qlcnic_cmd_args cmd;
 315        unsigned long timeout;
 316        int err;
 317
 318        memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
 319        cmd.hdr = hdr;
 320        cmd.pay = pay;
 321        cmd.pay_size = size;
 322        cmd.func_num = pci_func;
 323        cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
 324        cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
 325
 326        err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
 327        if (err) {
 328                dev_err(&adapter->pdev->dev,
 329                        "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 330                        __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
 331                        ahw->op_mode);
 332                return err;
 333        }
 334
 335        if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
 336                dev_err(&adapter->pdev->dev,
 337                        "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 338                        __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
 339                        ahw->op_mode);
 340                flush_workqueue(mbx->work_q);
 341        }
 342
 343        return cmd.rsp_opcode;
 344}
 345
 346static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
 347{
 348        adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
 349        adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
 350        adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
 351        adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
 352        adapter->num_txd = MAX_CMD_DESCRIPTORS;
 353        adapter->max_rds_rings = MAX_RDS_RINGS;
 354}
 355
 356int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
 357                                   struct qlcnic_info *npar_info, u16 vport_id)
 358{
 359        struct device *dev = &adapter->pdev->dev;
 360        struct qlcnic_cmd_args cmd;
 361        int err;
 362        u32 status;
 363
 364        err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
 365        if (err)
 366                return err;
 367
 368        cmd.req.arg[1] = vport_id << 16 | 0x1;
 369        err = qlcnic_issue_cmd(adapter, &cmd);
 370        if (err) {
 371                dev_err(&adapter->pdev->dev,
 372                        "Failed to get vport info, err=%d\n", err);
 373                qlcnic_free_mbx_args(&cmd);
 374                return err;
 375        }
 376
 377        status = cmd.rsp.arg[2] & 0xffff;
 378        if (status & BIT_0)
 379                npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
 380        if (status & BIT_1)
 381                npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
 382        if (status & BIT_2)
 383                npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
 384        if (status & BIT_3)
 385                npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
 386        if (status & BIT_4)
 387                npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
 388        if (status & BIT_5)
 389                npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
 390        if (status & BIT_6)
 391                npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
 392        if (status & BIT_7)
 393                npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
 394        if (status & BIT_8)
 395                npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
 396        if (status & BIT_9)
 397                npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
 398
 399        npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
 400        npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
 401        npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
 402        npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
 403
 404        dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
 405                 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
 406                 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
 407                 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
 408                 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
 409                 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
 410                 npar_info->min_tx_bw, npar_info->max_tx_bw,
 411                 npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
 412                 npar_info->max_rx_mcast_mac_filters,
 413                 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
 414                 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
 415                 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
 416                 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
 417                 npar_info->max_remote_ipv6_addrs);
 418
 419        qlcnic_free_mbx_args(&cmd);
 420        return err;
 421}
 422
 423static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
 424                                      struct qlcnic_cmd_args *cmd)
 425{
 426        adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
 427        adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
 428        return 0;
 429}
 430
 431static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
 432                                            struct qlcnic_cmd_args *cmd)
 433{
 434        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 435        int i, num_vlans;
 436        u16 *vlans;
 437
 438        if (sriov->allowed_vlans)
 439                return 0;
 440
 441        sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
 442        sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
 443        dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
 444                 sriov->num_allowed_vlans);
 445
 446        qlcnic_sriov_alloc_vlans(adapter);
 447
 448        if (!sriov->any_vlan)
 449                return 0;
 450
 451        num_vlans = sriov->num_allowed_vlans;
 452        sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
 453        if (!sriov->allowed_vlans)
 454                return -ENOMEM;
 455
 456        vlans = (u16 *)&cmd->rsp.arg[3];
 457        for (i = 0; i < num_vlans; i++)
 458                sriov->allowed_vlans[i] = vlans[i];
 459
 460        return 0;
 461}
 462
 463static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
 464{
 465        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 466        struct qlcnic_cmd_args cmd;
 467        int ret = 0;
 468
 469        memset(&cmd, 0, sizeof(cmd));
 470        ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
 471        if (ret)
 472                return ret;
 473
 474        ret = qlcnic_issue_cmd(adapter, &cmd);
 475        if (ret) {
 476                dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
 477                        ret);
 478        } else {
 479                sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
 480                switch (sriov->vlan_mode) {
 481                case QLC_GUEST_VLAN_MODE:
 482                        ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
 483                        break;
 484                case QLC_PVID_MODE:
 485                        ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
 486                        break;
 487                }
 488        }
 489
 490        qlcnic_free_mbx_args(&cmd);
 491        return ret;
 492}
 493
 494static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 495{
 496        struct qlcnic_hardware_context *ahw = adapter->ahw;
 497        struct qlcnic_info nic_info;
 498        int err;
 499
 500        err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
 501        if (err)
 502                return err;
 503
 504        ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
 505
 506        err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
 507        if (err)
 508                return -EIO;
 509
 510        if (qlcnic_83xx_get_port_info(adapter))
 511                return -EIO;
 512
 513        qlcnic_sriov_vf_cfg_buff_desc(adapter);
 514        adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
 515        dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
 516                 adapter->ahw->fw_hal_version);
 517
 518        ahw->physical_port = (u8) nic_info.phys_port;
 519        ahw->switch_mode = nic_info.switch_mode;
 520        ahw->max_mtu = nic_info.max_mtu;
 521        ahw->op_mode = nic_info.op_mode;
 522        ahw->capabilities = nic_info.capabilities;
 523        return 0;
 524}
 525
 526static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 527                                 int pci_using_dac)
 528{
 529        int err;
 530
 531        adapter->flags |= QLCNIC_VLAN_FILTERING;
 532        adapter->ahw->total_nic_func = 1;
 533        INIT_LIST_HEAD(&adapter->vf_mc_list);
 534        if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
 535                dev_warn(&adapter->pdev->dev,
 536                         "Device does not support MSI interrupts\n");
 537
 538        /* compute and set default and max tx/sds rings */
 539        qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
 540        qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
 541
 542        err = qlcnic_setup_intr(adapter);
 543        if (err) {
 544                dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
 545                goto err_out_disable_msi;
 546        }
 547
 548        err = qlcnic_83xx_setup_mbx_intr(adapter);
 549        if (err)
 550                goto err_out_disable_msi;
 551
 552        err = qlcnic_sriov_init(adapter, 1);
 553        if (err)
 554                goto err_out_disable_mbx_intr;
 555
 556        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
 557        if (err)
 558                goto err_out_cleanup_sriov;
 559
 560        err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
 561        if (err)
 562                goto err_out_disable_bc_intr;
 563
 564        err = qlcnic_sriov_vf_init_driver(adapter);
 565        if (err)
 566                goto err_out_send_channel_term;
 567
 568        err = qlcnic_sriov_get_vf_acl(adapter);
 569        if (err)
 570                goto err_out_send_channel_term;
 571
 572        err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
 573        if (err)
 574                goto err_out_send_channel_term;
 575
 576        pci_set_drvdata(adapter->pdev, adapter);
 577        dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
 578                 adapter->netdev->name);
 579
 580        qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
 581                             adapter->ahw->idc.delay);
 582        return 0;
 583
 584err_out_send_channel_term:
 585        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
 586
 587err_out_disable_bc_intr:
 588        qlcnic_sriov_cfg_bc_intr(adapter, 0);
 589
 590err_out_cleanup_sriov:
 591        __qlcnic_sriov_cleanup(adapter);
 592
 593err_out_disable_mbx_intr:
 594        qlcnic_83xx_free_mbx_intr(adapter);
 595
 596err_out_disable_msi:
 597        qlcnic_teardown_intr(adapter);
 598        return err;
 599}
 600
 601static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
 602{
 603        u32 state;
 604
 605        do {
 606                msleep(20);
 607                if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
 608                        return -EIO;
 609                state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
 610        } while (state != QLC_83XX_IDC_DEV_READY);
 611
 612        return 0;
 613}
 614
 615int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 616{
 617        struct qlcnic_hardware_context *ahw = adapter->ahw;
 618        int err;
 619
 620        set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
 621        ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
 622        ahw->reset_context = 0;
 623        adapter->fw_fail_cnt = 0;
 624        ahw->msix_supported = 1;
 625        adapter->need_fw_reset = 0;
 626        adapter->flags |= QLCNIC_TX_INTR_SHARED;
 627
 628        err = qlcnic_sriov_check_dev_ready(adapter);
 629        if (err)
 630                return err;
 631
 632        err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
 633        if (err)
 634                return err;
 635
 636        if (qlcnic_read_mac_addr(adapter))
 637                dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
 638
 639        INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
 640
 641        clear_bit(__QLCNIC_RESETTING, &adapter->state);
 642        return 0;
 643}
 644
 645void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
 646{
 647        struct qlcnic_hardware_context *ahw = adapter->ahw;
 648
 649        ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
 650        dev_info(&adapter->pdev->dev,
 651                 "HAL Version: %d Non Privileged SRIOV function\n",
 652                 ahw->fw_hal_version);
 653        adapter->nic_ops = &qlcnic_sriov_vf_ops;
 654        set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
 655        return;
 656}
 657
 658void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
 659{
 660        ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
 661        ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
 662        ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
 663}
 664
 665static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
 666{
 667        u32 pay_size;
 668
 669        pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
 670
 671        if (pay_size)
 672                pay_size = QLC_BC_PAYLOAD_SZ;
 673        else
 674                pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
 675
 676        return pay_size;
 677}
 678
 679int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
 680{
 681        struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
 682        u8 i;
 683
 684        if (qlcnic_sriov_vf_check(adapter))
 685                return 0;
 686
 687        for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
 688                if (vf_info[i].pci_func == pci_func)
 689                        return i;
 690        }
 691
 692        return -EINVAL;
 693}
 694
 695static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
 696{
 697        *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
 698        if (!*trans)
 699                return -ENOMEM;
 700
 701        init_completion(&(*trans)->resp_cmpl);
 702        return 0;
 703}
 704
 705static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
 706                                            u32 size)
 707{
 708        *hdr = kcalloc(size, sizeof(struct qlcnic_bc_hdr), GFP_ATOMIC);
 709        if (!*hdr)
 710                return -ENOMEM;
 711
 712        return 0;
 713}
 714
 715static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
 716{
 717        const struct qlcnic_mailbox_metadata *mbx_tbl;
 718        int i, size;
 719
 720        mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
 721        size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
 722
 723        for (i = 0; i < size; i++) {
 724                if (type == mbx_tbl[i].cmd) {
 725                        mbx->op_type = QLC_BC_CMD;
 726                        mbx->req.num = mbx_tbl[i].in_args;
 727                        mbx->rsp.num = mbx_tbl[i].out_args;
 728                        mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
 729                                               GFP_ATOMIC);
 730                        if (!mbx->req.arg)
 731                                return -ENOMEM;
 732                        mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
 733                                               GFP_ATOMIC);
 734                        if (!mbx->rsp.arg) {
 735                                kfree(mbx->req.arg);
 736                                mbx->req.arg = NULL;
 737                                return -ENOMEM;
 738                        }
 739                        mbx->req.arg[0] = (type | (mbx->req.num << 16) |
 740                                           (3 << 29));
 741                        mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
 742                        return 0;
 743                }
 744        }
 745        return -EINVAL;
 746}
 747
 748static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
 749                                       struct qlcnic_cmd_args *cmd,
 750                                       u16 seq, u8 msg_type)
 751{
 752        struct qlcnic_bc_hdr *hdr;
 753        int i;
 754        u32 num_regs, bc_pay_sz;
 755        u16 remainder;
 756        u8 cmd_op, num_frags, t_num_frags;
 757
 758        bc_pay_sz = QLC_BC_PAYLOAD_SZ;
 759        if (msg_type == QLC_BC_COMMAND) {
 760                trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
 761                trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
 762                num_regs = cmd->req.num;
 763                trans->req_pay_size = (num_regs * 4);
 764                num_regs = cmd->rsp.num;
 765                trans->rsp_pay_size = (num_regs * 4);
 766                cmd_op = cmd->req.arg[0] & 0xff;
 767                remainder = (trans->req_pay_size) % (bc_pay_sz);
 768                num_frags = (trans->req_pay_size) / (bc_pay_sz);
 769                if (remainder)
 770                        num_frags++;
 771                t_num_frags = num_frags;
 772                if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
 773                        return -ENOMEM;
 774                remainder = (trans->rsp_pay_size) % (bc_pay_sz);
 775                num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
 776                if (remainder)
 777                        num_frags++;
 778                if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
 779                        return -ENOMEM;
 780                num_frags  = t_num_frags;
 781                hdr = trans->req_hdr;
 782        }  else {
 783                cmd->req.arg = (u32 *)trans->req_pay;
 784                cmd->rsp.arg = (u32 *)trans->rsp_pay;
 785                cmd_op = cmd->req.arg[0] & 0xff;
 786                cmd->cmd_op = cmd_op;
 787                remainder = (trans->rsp_pay_size) % (bc_pay_sz);
 788                num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
 789                if (remainder)
 790                        num_frags++;
 791                cmd->req.num = trans->req_pay_size / 4;
 792                cmd->rsp.num = trans->rsp_pay_size / 4;
 793                hdr = trans->rsp_hdr;
 794                cmd->op_type = trans->req_hdr->op_type;
 795        }
 796
 797        trans->trans_id = seq;
 798        trans->cmd_id = cmd_op;
 799        for (i = 0; i < num_frags; i++) {
 800                hdr[i].version = 2;
 801                hdr[i].msg_type = msg_type;
 802                hdr[i].op_type = cmd->op_type;
 803                hdr[i].num_cmds = 1;
 804                hdr[i].num_frags = num_frags;
 805                hdr[i].frag_num = i + 1;
 806                hdr[i].cmd_op = cmd_op;
 807                hdr[i].seq_id = seq;
 808        }
 809        return 0;
 810}
 811
 812static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
 813{
 814        if (!trans)
 815                return;
 816        kfree(trans->req_hdr);
 817        kfree(trans->rsp_hdr);
 818        kfree(trans);
 819}
 820
 821static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
 822                                    struct qlcnic_bc_trans *trans, u8 type)
 823{
 824        struct qlcnic_trans_list *t_list;
 825        unsigned long flags;
 826        int ret = 0;
 827
 828        if (type == QLC_BC_RESPONSE) {
 829                t_list = &vf->rcv_act;
 830                spin_lock_irqsave(&t_list->lock, flags);
 831                t_list->count--;
 832                list_del(&trans->list);
 833                if (t_list->count > 0)
 834                        ret = 1;
 835                spin_unlock_irqrestore(&t_list->lock, flags);
 836        }
 837        if (type == QLC_BC_COMMAND) {
 838                while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
 839                        msleep(100);
 840                vf->send_cmd = NULL;
 841                clear_bit(QLC_BC_VF_SEND, &vf->state);
 842        }
 843        return ret;
 844}
 845
 846static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
 847                                         struct qlcnic_vf_info *vf,
 848                                         work_func_t func)
 849{
 850        if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
 851            vf->adapter->need_fw_reset)
 852                return;
 853
 854        queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
 855}
 856
 857static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
 858{
 859        struct completion *cmpl = &trans->resp_cmpl;
 860
 861        if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
 862                trans->trans_state = QLC_END;
 863        else
 864                trans->trans_state = QLC_ABORT;
 865
 866        return;
 867}
 868
 869static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
 870                                            u8 type)
 871{
 872        if (type == QLC_BC_RESPONSE) {
 873                trans->curr_rsp_frag++;
 874                if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
 875                        trans->trans_state = QLC_INIT;
 876                else
 877                        trans->trans_state = QLC_END;
 878        } else {
 879                trans->curr_req_frag++;
 880                if (trans->curr_req_frag < trans->req_hdr->num_frags)
 881                        trans->trans_state = QLC_INIT;
 882                else
 883                        trans->trans_state = QLC_WAIT_FOR_RESP;
 884        }
 885}
 886
 887static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
 888                                               u8 type)
 889{
 890        struct qlcnic_vf_info *vf = trans->vf;
 891        struct completion *cmpl = &vf->ch_free_cmpl;
 892
 893        if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
 894                trans->trans_state = QLC_ABORT;
 895                return;
 896        }
 897
 898        clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
 899        qlcnic_sriov_handle_multi_frags(trans, type);
 900}
 901
 902static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
 903                                     u32 *hdr, u32 *pay, u32 size)
 904{
 905        struct qlcnic_hardware_context *ahw = adapter->ahw;
 906        u8 i, max = 2, hdr_size, j;
 907
 908        hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 909        max = (size / sizeof(u32)) + hdr_size;
 910
 911        for (i = 2, j = 0; j < hdr_size; i++, j++)
 912                *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
 913        for (; j < max; i++, j++)
 914                *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
 915}
 916
 917static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
 918{
 919        int ret = -EBUSY;
 920        u32 timeout = 10000;
 921
 922        do {
 923                if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
 924                        ret = 0;
 925                        break;
 926                }
 927                mdelay(1);
 928        } while (--timeout);
 929
 930        return ret;
 931}
 932
 933static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
 934{
 935        struct qlcnic_vf_info *vf = trans->vf;
 936        u32 pay_size;
 937        u32 *hdr, *pay;
 938        int ret;
 939        u8 pci_func = trans->func_id;
 940
 941        if (__qlcnic_sriov_issue_bc_post(vf))
 942                return -EBUSY;
 943
 944        if (type == QLC_BC_COMMAND) {
 945                hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
 946                pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
 947                pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
 948                                                       trans->curr_req_frag);
 949                pay_size = (pay_size / sizeof(u32));
 950        } else {
 951                hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
 952                pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
 953                pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
 954                                                       trans->curr_rsp_frag);
 955                pay_size = (pay_size / sizeof(u32));
 956        }
 957
 958        ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
 959                                       pci_func, pay_size);
 960        return ret;
 961}
 962
 963static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
 964                                      struct qlcnic_vf_info *vf, u8 type)
 965{
 966        bool flag = true;
 967        int err = -EIO;
 968
 969        while (flag) {
 970                if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
 971                    vf->adapter->need_fw_reset)
 972                        trans->trans_state = QLC_ABORT;
 973
 974                switch (trans->trans_state) {
 975                case QLC_INIT:
 976                        trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
 977                        if (qlcnic_sriov_issue_bc_post(trans, type))
 978                                trans->trans_state = QLC_ABORT;
 979                        break;
 980                case QLC_WAIT_FOR_CHANNEL_FREE:
 981                        qlcnic_sriov_wait_for_channel_free(trans, type);
 982                        break;
 983                case QLC_WAIT_FOR_RESP:
 984                        qlcnic_sriov_wait_for_resp(trans);
 985                        break;
 986                case QLC_END:
 987                        err = 0;
 988                        flag = false;
 989                        break;
 990                case QLC_ABORT:
 991                        err = -EIO;
 992                        flag = false;
 993                        clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
 994                        break;
 995                default:
 996                        err = -EIO;
 997                        flag = false;
 998                }
 999        }
1000        return err;
1001}
1002
1003static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
1004                                    struct qlcnic_bc_trans *trans, int pci_func)
1005{
1006        struct qlcnic_vf_info *vf;
1007        int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
1008
1009        if (index < 0)
1010                return -EIO;
1011
1012        vf = &adapter->ahw->sriov->vf_info[index];
1013        trans->vf = vf;
1014        trans->func_id = pci_func;
1015
1016        if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1017                if (qlcnic_sriov_pf_check(adapter))
1018                        return -EIO;
1019                if (qlcnic_sriov_vf_check(adapter) &&
1020                    trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1021                        return -EIO;
1022        }
1023
1024        mutex_lock(&vf->send_cmd_lock);
1025        vf->send_cmd = trans;
1026        err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1027        qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1028        mutex_unlock(&vf->send_cmd_lock);
1029        return err;
1030}
1031
1032static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1033                                          struct qlcnic_bc_trans *trans,
1034                                          struct qlcnic_cmd_args *cmd)
1035{
1036#ifdef CONFIG_QLCNIC_SRIOV
1037        if (qlcnic_sriov_pf_check(adapter)) {
1038                qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1039                return;
1040        }
1041#endif
1042        cmd->rsp.arg[0] |= (0x9 << 25);
1043        return;
1044}
1045
1046static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1047{
1048        struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1049                                                 trans_work);
1050        struct qlcnic_bc_trans *trans = NULL;
1051        struct qlcnic_adapter *adapter  = vf->adapter;
1052        struct qlcnic_cmd_args cmd;
1053        u8 req;
1054
1055        if (adapter->need_fw_reset)
1056                return;
1057
1058        if (test_bit(QLC_BC_VF_FLR, &vf->state))
1059                return;
1060
1061        memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1062        trans = list_first_entry(&vf->rcv_act.wait_list,
1063                                 struct qlcnic_bc_trans, list);
1064        adapter = vf->adapter;
1065
1066        if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1067                                        QLC_BC_RESPONSE))
1068                goto cleanup_trans;
1069
1070        __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1071        trans->trans_state = QLC_INIT;
1072        __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1073
1074cleanup_trans:
1075        qlcnic_free_mbx_args(&cmd);
1076        req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1077        qlcnic_sriov_cleanup_transaction(trans);
1078        if (req)
1079                qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1080                                             qlcnic_sriov_process_bc_cmd);
1081}
1082
1083static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1084                                        struct qlcnic_vf_info *vf)
1085{
1086        struct qlcnic_bc_trans *trans;
1087        u32 pay_size;
1088
1089        if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1090                return;
1091
1092        trans = vf->send_cmd;
1093
1094        if (trans == NULL)
1095                goto clear_send;
1096
1097        if (trans->trans_id != hdr->seq_id)
1098                goto clear_send;
1099
1100        pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1101                                               trans->curr_rsp_frag);
1102        qlcnic_sriov_pull_bc_msg(vf->adapter,
1103                                 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1104                                 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1105                                 pay_size);
1106        if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1107                goto clear_send;
1108
1109        complete(&trans->resp_cmpl);
1110
1111clear_send:
1112        clear_bit(QLC_BC_VF_SEND, &vf->state);
1113}
1114
1115int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1116                                struct qlcnic_vf_info *vf,
1117                                struct qlcnic_bc_trans *trans)
1118{
1119        struct qlcnic_trans_list *t_list = &vf->rcv_act;
1120
1121        t_list->count++;
1122        list_add_tail(&trans->list, &t_list->wait_list);
1123        if (t_list->count == 1)
1124                qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1125                                             qlcnic_sriov_process_bc_cmd);
1126        return 0;
1127}
1128
1129static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1130                                     struct qlcnic_vf_info *vf,
1131                                     struct qlcnic_bc_trans *trans)
1132{
1133        struct qlcnic_trans_list *t_list = &vf->rcv_act;
1134
1135        spin_lock(&t_list->lock);
1136
1137        __qlcnic_sriov_add_act_list(sriov, vf, trans);
1138
1139        spin_unlock(&t_list->lock);
1140        return 0;
1141}
1142
1143static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1144                                              struct qlcnic_vf_info *vf,
1145                                              struct qlcnic_bc_hdr *hdr)
1146{
1147        struct qlcnic_bc_trans *trans = NULL;
1148        struct list_head *node;
1149        u32 pay_size, curr_frag;
1150        u8 found = 0, active = 0;
1151
1152        spin_lock(&vf->rcv_pend.lock);
1153        if (vf->rcv_pend.count > 0) {
1154                list_for_each(node, &vf->rcv_pend.wait_list) {
1155                        trans = list_entry(node, struct qlcnic_bc_trans, list);
1156                        if (trans->trans_id == hdr->seq_id) {
1157                                found = 1;
1158                                break;
1159                        }
1160                }
1161        }
1162
1163        if (found) {
1164                curr_frag = trans->curr_req_frag;
1165                pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1166                                                       curr_frag);
1167                qlcnic_sriov_pull_bc_msg(vf->adapter,
1168                                         (u32 *)(trans->req_hdr + curr_frag),
1169                                         (u32 *)(trans->req_pay + curr_frag),
1170                                         pay_size);
1171                trans->curr_req_frag++;
1172                if (trans->curr_req_frag >= hdr->num_frags) {
1173                        vf->rcv_pend.count--;
1174                        list_del(&trans->list);
1175                        active = 1;
1176                }
1177        }
1178        spin_unlock(&vf->rcv_pend.lock);
1179
1180        if (active)
1181                if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1182                        qlcnic_sriov_cleanup_transaction(trans);
1183
1184        return;
1185}
1186
1187static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1188                                       struct qlcnic_bc_hdr *hdr,
1189                                       struct qlcnic_vf_info *vf)
1190{
1191        struct qlcnic_bc_trans *trans;
1192        struct qlcnic_adapter *adapter = vf->adapter;
1193        struct qlcnic_cmd_args cmd;
1194        u32 pay_size;
1195        int err;
1196        u8 cmd_op;
1197
1198        if (adapter->need_fw_reset)
1199                return;
1200
1201        if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1202            hdr->op_type != QLC_BC_CMD &&
1203            hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1204                return;
1205
1206        if (hdr->frag_num > 1) {
1207                qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1208                return;
1209        }
1210
1211        memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1212        cmd_op = hdr->cmd_op;
1213        if (qlcnic_sriov_alloc_bc_trans(&trans))
1214                return;
1215
1216        if (hdr->op_type == QLC_BC_CMD)
1217                err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1218        else
1219                err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1220
1221        if (err) {
1222                qlcnic_sriov_cleanup_transaction(trans);
1223                return;
1224        }
1225
1226        cmd.op_type = hdr->op_type;
1227        if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1228                                        QLC_BC_COMMAND)) {
1229                qlcnic_free_mbx_args(&cmd);
1230                qlcnic_sriov_cleanup_transaction(trans);
1231                return;
1232        }
1233
1234        pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1235                                         trans->curr_req_frag);
1236        qlcnic_sriov_pull_bc_msg(vf->adapter,
1237                                 (u32 *)(trans->req_hdr + trans->curr_req_frag),
1238                                 (u32 *)(trans->req_pay + trans->curr_req_frag),
1239                                 pay_size);
1240        trans->func_id = vf->pci_func;
1241        trans->vf = vf;
1242        trans->trans_id = hdr->seq_id;
1243        trans->curr_req_frag++;
1244
1245        if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1246                return;
1247
1248        if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1249                if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1250                        qlcnic_free_mbx_args(&cmd);
1251                        qlcnic_sriov_cleanup_transaction(trans);
1252                }
1253        } else {
1254                spin_lock(&vf->rcv_pend.lock);
1255                list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1256                vf->rcv_pend.count++;
1257                spin_unlock(&vf->rcv_pend.lock);
1258        }
1259}
1260
1261static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1262                                          struct qlcnic_vf_info *vf)
1263{
1264        struct qlcnic_bc_hdr hdr;
1265        u32 *ptr = (u32 *)&hdr;
1266        u8 msg_type, i;
1267
1268        for (i = 2; i < 6; i++)
1269                ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1270        msg_type = hdr.msg_type;
1271
1272        switch (msg_type) {
1273        case QLC_BC_COMMAND:
1274                qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1275                break;
1276        case QLC_BC_RESPONSE:
1277                qlcnic_sriov_handle_bc_resp(&hdr, vf);
1278                break;
1279        }
1280}
1281
1282static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1283                                          struct qlcnic_vf_info *vf)
1284{
1285        struct qlcnic_adapter *adapter = vf->adapter;
1286
1287        if (qlcnic_sriov_pf_check(adapter))
1288                qlcnic_sriov_pf_handle_flr(sriov, vf);
1289        else
1290                dev_err(&adapter->pdev->dev,
1291                        "Invalid event to VF. VF should not get FLR event\n");
1292}
1293
1294void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1295{
1296        struct qlcnic_vf_info *vf;
1297        struct qlcnic_sriov *sriov;
1298        int index;
1299        u8 pci_func;
1300
1301        sriov = adapter->ahw->sriov;
1302        pci_func = qlcnic_sriov_target_func_id(event);
1303        index = qlcnic_sriov_func_to_index(adapter, pci_func);
1304
1305        if (index < 0)
1306                return;
1307
1308        vf = &sriov->vf_info[index];
1309        vf->pci_func = pci_func;
1310
1311        if (qlcnic_sriov_channel_free_check(event))
1312                complete(&vf->ch_free_cmpl);
1313
1314        if (qlcnic_sriov_flr_check(event)) {
1315                qlcnic_sriov_handle_flr_event(sriov, vf);
1316                return;
1317        }
1318
1319        if (qlcnic_sriov_bc_msg_check(event))
1320                qlcnic_sriov_handle_msg_event(sriov, vf);
1321}
1322
1323int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1324{
1325        struct qlcnic_cmd_args cmd;
1326        int err;
1327
1328        if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1329                return 0;
1330
1331        if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1332                return -ENOMEM;
1333
1334        if (enable)
1335                cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1336
1337        err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1338
1339        if (err != QLCNIC_RCODE_SUCCESS) {
1340                dev_err(&adapter->pdev->dev,
1341                        "Failed to %s bc events, err=%d\n",
1342                        (enable ? "enable" : "disable"), err);
1343        }
1344
1345        qlcnic_free_mbx_args(&cmd);
1346        return err;
1347}
1348
1349static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1350                                     struct qlcnic_bc_trans *trans)
1351{
1352        u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1353        u32 state;
1354
1355        state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1356        if (state == QLC_83XX_IDC_DEV_READY) {
1357                msleep(20);
1358                clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1359                trans->trans_state = QLC_INIT;
1360                if (++adapter->fw_fail_cnt > max)
1361                        return -EIO;
1362                else
1363                        return 0;
1364        }
1365
1366        return -EIO;
1367}
1368
1369static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1370                                  struct qlcnic_cmd_args *cmd)
1371{
1372        struct qlcnic_hardware_context *ahw = adapter->ahw;
1373        struct qlcnic_mailbox *mbx = ahw->mailbox;
1374        struct device *dev = &adapter->pdev->dev;
1375        struct qlcnic_bc_trans *trans;
1376        int err;
1377        u32 rsp_data, opcode, mbx_err_code, rsp;
1378        u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1379        u8 func = ahw->pci_func;
1380
1381        rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1382        if (rsp)
1383                goto free_cmd;
1384
1385        rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1386        if (rsp)
1387                goto cleanup_transaction;
1388
1389retry:
1390        if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1391                rsp = -EIO;
1392                QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1393                      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1394                goto err_out;
1395        }
1396
1397        err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1398        if (err) {
1399                dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1400                        (cmd->req.arg[0] & 0xffff), func);
1401                rsp = QLCNIC_RCODE_TIMEOUT;
1402
1403                /* After adapter reset PF driver may take some time to
1404                 * respond to VF's request. Retry request till maximum retries.
1405                 */
1406                if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1407                    !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1408                        goto retry;
1409
1410                goto err_out;
1411        }
1412
1413        rsp_data = cmd->rsp.arg[0];
1414        mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1415        opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1416
1417        if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1418            (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1419                rsp = QLCNIC_RCODE_SUCCESS;
1420        } else {
1421                if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1422                        rsp = QLCNIC_RCODE_SUCCESS;
1423                } else {
1424                        rsp = mbx_err_code;
1425                        if (!rsp)
1426                                rsp = 1;
1427
1428                        dev_err(dev,
1429                                "MBX command 0x%x failed with err:0x%x for VF %d\n",
1430                                opcode, mbx_err_code, func);
1431                }
1432        }
1433
1434err_out:
1435        if (rsp == QLCNIC_RCODE_TIMEOUT) {
1436                ahw->reset_context = 1;
1437                adapter->need_fw_reset = 1;
1438                clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1439        }
1440
1441cleanup_transaction:
1442        qlcnic_sriov_cleanup_transaction(trans);
1443
1444free_cmd:
1445        if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1446                qlcnic_free_mbx_args(cmd);
1447                kfree(cmd);
1448        }
1449
1450        return rsp;
1451}
1452
1453
1454static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1455                                  struct qlcnic_cmd_args *cmd)
1456{
1457        if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1458                return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1459        else
1460                return __qlcnic_sriov_issue_cmd(adapter, cmd);
1461}
1462
1463static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1464{
1465        struct qlcnic_cmd_args cmd;
1466        struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1467        int ret;
1468
1469        memset(&cmd, 0, sizeof(cmd));
1470        if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1471                return -ENOMEM;
1472
1473        ret = qlcnic_issue_cmd(adapter, &cmd);
1474        if (ret) {
1475                dev_err(&adapter->pdev->dev,
1476                        "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1477                        ret);
1478                goto out;
1479        }
1480
1481        cmd_op = (cmd.rsp.arg[0] & 0xff);
1482        if (cmd.rsp.arg[0] >> 25 == 2)
1483                return 2;
1484        if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1485                set_bit(QLC_BC_VF_STATE, &vf->state);
1486        else
1487                clear_bit(QLC_BC_VF_STATE, &vf->state);
1488
1489out:
1490        qlcnic_free_mbx_args(&cmd);
1491        return ret;
1492}
1493
1494static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1495                                  enum qlcnic_mac_type mac_type)
1496{
1497        struct qlcnic_adapter *adapter = netdev_priv(netdev);
1498        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1499        struct qlcnic_vf_info *vf;
1500        u16 vlan_id;
1501        int i;
1502
1503        vf = &adapter->ahw->sriov->vf_info[0];
1504
1505        if (!qlcnic_sriov_check_any_vlan(vf)) {
1506                qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1507        } else {
1508                spin_lock(&vf->vlan_list_lock);
1509                for (i = 0; i < sriov->num_allowed_vlans; i++) {
1510                        vlan_id = vf->sriov_vlans[i];
1511                        if (vlan_id)
1512                                qlcnic_nic_add_mac(adapter, mac, vlan_id,
1513                                                   mac_type);
1514                }
1515                spin_unlock(&vf->vlan_list_lock);
1516                if (qlcnic_84xx_check(adapter))
1517                        qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1518        }
1519}
1520
1521void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1522{
1523        struct list_head *head = &bc->async_cmd_list;
1524        struct qlcnic_async_cmd *entry;
1525
1526        flush_workqueue(bc->bc_async_wq);
1527        cancel_work_sync(&bc->vf_async_work);
1528
1529        spin_lock(&bc->queue_lock);
1530        while (!list_empty(head)) {
1531                entry = list_entry(head->next, struct qlcnic_async_cmd,
1532                                   list);
1533                list_del(&entry->list);
1534                kfree(entry->cmd);
1535                kfree(entry);
1536        }
1537        spin_unlock(&bc->queue_lock);
1538}
1539
1540void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1541{
1542        struct qlcnic_adapter *adapter = netdev_priv(netdev);
1543        struct qlcnic_hardware_context *ahw = adapter->ahw;
1544        static const u8 bcast_addr[ETH_ALEN] = {
1545                0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1546        };
1547        struct netdev_hw_addr *ha;
1548        u32 mode = VPORT_MISS_MODE_DROP;
1549
1550        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1551                return;
1552
1553        if (netdev->flags & IFF_PROMISC) {
1554                if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1555                        mode = VPORT_MISS_MODE_ACCEPT_ALL;
1556        } else if ((netdev->flags & IFF_ALLMULTI) ||
1557                   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1558                mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1559        } else {
1560                qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
1561                if (!netdev_mc_empty(netdev)) {
1562                        qlcnic_flush_mcast_mac(adapter);
1563                        netdev_for_each_mc_addr(ha, netdev)
1564                                qlcnic_vf_add_mc_list(netdev, ha->addr,
1565                                                      QLCNIC_MULTICAST_MAC);
1566                }
1567        }
1568
1569        /* configure unicast MAC address, if there is not sufficient space
1570         * to store all the unicast addresses then enable promiscuous mode
1571         */
1572        if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1573                mode = VPORT_MISS_MODE_ACCEPT_ALL;
1574        } else if (!netdev_uc_empty(netdev)) {
1575                netdev_for_each_uc_addr(ha, netdev)
1576                        qlcnic_vf_add_mc_list(netdev, ha->addr,
1577                                              QLCNIC_UNICAST_MAC);
1578        }
1579
1580        if (adapter->pdev->is_virtfn) {
1581                if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1582                    !adapter->fdb_mac_learn) {
1583                        qlcnic_alloc_lb_filters_mem(adapter);
1584                        adapter->drv_mac_learn = true;
1585                        adapter->rx_mac_learn = true;
1586                } else {
1587                        adapter->drv_mac_learn = false;
1588                        adapter->rx_mac_learn = false;
1589                }
1590        }
1591
1592        qlcnic_nic_set_promisc(adapter, mode);
1593}
1594
1595static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1596{
1597        struct qlcnic_async_cmd *entry, *tmp;
1598        struct qlcnic_back_channel *bc;
1599        struct qlcnic_cmd_args *cmd;
1600        struct list_head *head;
1601        LIST_HEAD(del_list);
1602
1603        bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
1604        head = &bc->async_cmd_list;
1605
1606        spin_lock(&bc->queue_lock);
1607        list_splice_init(head, &del_list);
1608        spin_unlock(&bc->queue_lock);
1609
1610        list_for_each_entry_safe(entry, tmp, &del_list, list) {
1611                list_del(&entry->list);
1612                cmd = entry->cmd;
1613                __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
1614                kfree(entry);
1615        }
1616
1617        if (!list_empty(head))
1618                queue_work(bc->bc_async_wq, &bc->vf_async_work);
1619
1620        return;
1621}
1622
1623static struct qlcnic_async_cmd *
1624qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
1625                             struct qlcnic_cmd_args *cmd)
1626{
1627        struct qlcnic_async_cmd *entry = NULL;
1628
1629        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1630        if (!entry)
1631                return NULL;
1632
1633        entry->cmd = cmd;
1634
1635        spin_lock(&bc->queue_lock);
1636        list_add_tail(&entry->list, &bc->async_cmd_list);
1637        spin_unlock(&bc->queue_lock);
1638
1639        return entry;
1640}
1641
1642static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1643                                            struct qlcnic_cmd_args *cmd)
1644{
1645        struct qlcnic_async_cmd *entry = NULL;
1646
1647        entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
1648        if (!entry) {
1649                qlcnic_free_mbx_args(cmd);
1650                kfree(cmd);
1651                return;
1652        }
1653
1654        queue_work(bc->bc_async_wq, &bc->vf_async_work);
1655}
1656
1657static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1658                                        struct qlcnic_cmd_args *cmd)
1659{
1660
1661        struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1662
1663        if (adapter->need_fw_reset)
1664                return -EIO;
1665
1666        qlcnic_sriov_schedule_async_cmd(bc, cmd);
1667
1668        return 0;
1669}
1670
1671static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1672{
1673        int err;
1674
1675        adapter->need_fw_reset = 0;
1676        qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1677        qlcnic_83xx_enable_mbx_interrupt(adapter);
1678
1679        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1680        if (err)
1681                return err;
1682
1683        err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1684        if (err)
1685                goto err_out_cleanup_bc_intr;
1686
1687        err = qlcnic_sriov_vf_init_driver(adapter);
1688        if (err)
1689                goto err_out_term_channel;
1690
1691        return 0;
1692
1693err_out_term_channel:
1694        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1695
1696err_out_cleanup_bc_intr:
1697        qlcnic_sriov_cfg_bc_intr(adapter, 0);
1698        return err;
1699}
1700
1701static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1702{
1703        struct net_device *netdev = adapter->netdev;
1704
1705        if (netif_running(netdev)) {
1706                if (!qlcnic_up(adapter, netdev))
1707                        qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1708        }
1709
1710        netif_device_attach(netdev);
1711}
1712
1713static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1714{
1715        struct qlcnic_hardware_context *ahw = adapter->ahw;
1716        struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1717        struct net_device *netdev = adapter->netdev;
1718        u8 i, max_ints = ahw->num_msix - 1;
1719
1720        netif_device_detach(netdev);
1721        qlcnic_83xx_detach_mailbox_work(adapter);
1722        qlcnic_83xx_disable_mbx_intr(adapter);
1723
1724        if (netif_running(netdev))
1725                qlcnic_down(adapter, netdev);
1726
1727        for (i = 0; i < max_ints; i++) {
1728                intr_tbl[i].id = i;
1729                intr_tbl[i].enabled = 0;
1730                intr_tbl[i].src = 0;
1731        }
1732        ahw->reset_context = 0;
1733}
1734
1735static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1736{
1737        struct qlcnic_hardware_context *ahw = adapter->ahw;
1738        struct device *dev = &adapter->pdev->dev;
1739        struct qlc_83xx_idc *idc = &ahw->idc;
1740        u8 func = ahw->pci_func;
1741        u32 state;
1742
1743        if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1744            (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1745                if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1746                        qlcnic_sriov_vf_attach(adapter);
1747                        adapter->fw_fail_cnt = 0;
1748                        dev_info(dev,
1749                                 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1750                                 __func__, func);
1751                } else {
1752                        dev_err(dev,
1753                                "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1754                                __func__, func);
1755                        state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1756                        dev_info(dev, "Current state 0x%x after FW reset\n",
1757                                 state);
1758                }
1759        }
1760
1761        return 0;
1762}
1763
1764static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1765{
1766        struct qlcnic_hardware_context *ahw = adapter->ahw;
1767        struct qlcnic_mailbox *mbx = ahw->mailbox;
1768        struct device *dev = &adapter->pdev->dev;
1769        struct qlc_83xx_idc *idc = &ahw->idc;
1770        u8 func = ahw->pci_func;
1771        u32 state;
1772
1773        adapter->reset_ctx_cnt++;
1774
1775        /* Skip the context reset and check if FW is hung */
1776        if (adapter->reset_ctx_cnt < 3) {
1777                adapter->need_fw_reset = 1;
1778                clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1779                dev_info(dev,
1780                         "Resetting context, wait here to check if FW is in failed state\n");
1781                return 0;
1782        }
1783
1784        /* Check if number of resets exceed the threshold.
1785         * If it exceeds the threshold just fail the VF.
1786         */
1787        if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1788                clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1789                adapter->tx_timeo_cnt = 0;
1790                adapter->fw_fail_cnt = 0;
1791                adapter->reset_ctx_cnt = 0;
1792                qlcnic_sriov_vf_detach(adapter);
1793                dev_err(dev,
1794                        "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1795                return -EIO;
1796        }
1797
1798        dev_info(dev, "Resetting context of VF 0x%x\n", func);
1799        dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1800                 __func__, adapter->reset_ctx_cnt, func);
1801        set_bit(__QLCNIC_RESETTING, &adapter->state);
1802        adapter->need_fw_reset = 1;
1803        clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1804        qlcnic_sriov_vf_detach(adapter);
1805        adapter->need_fw_reset = 0;
1806
1807        if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1808                qlcnic_sriov_vf_attach(adapter);
1809                adapter->tx_timeo_cnt = 0;
1810                adapter->reset_ctx_cnt = 0;
1811                adapter->fw_fail_cnt = 0;
1812                dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1813        } else {
1814                dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1815                        __func__, func);
1816                state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1817                dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1818        }
1819
1820        return 0;
1821}
1822
1823static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1824{
1825        struct qlcnic_hardware_context *ahw = adapter->ahw;
1826        int ret = 0;
1827
1828        if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1829                ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1830        else if (ahw->reset_context)
1831                ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1832
1833        clear_bit(__QLCNIC_RESETTING, &adapter->state);
1834        return ret;
1835}
1836
1837static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1838{
1839        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1840
1841        dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1842        if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1843                qlcnic_sriov_vf_detach(adapter);
1844
1845        clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1846        clear_bit(__QLCNIC_RESETTING, &adapter->state);
1847        return -EIO;
1848}
1849
1850static int
1851qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1852{
1853        struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1854        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1855
1856        dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1857        if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1858                set_bit(__QLCNIC_RESETTING, &adapter->state);
1859                adapter->tx_timeo_cnt = 0;
1860                adapter->reset_ctx_cnt = 0;
1861                clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1862                qlcnic_sriov_vf_detach(adapter);
1863        }
1864
1865        return 0;
1866}
1867
1868static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1869{
1870        struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1871        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1872        u8 func = adapter->ahw->pci_func;
1873
1874        if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1875                dev_err(&adapter->pdev->dev,
1876                        "Firmware hang detected by VF 0x%x\n", func);
1877                set_bit(__QLCNIC_RESETTING, &adapter->state);
1878                adapter->tx_timeo_cnt = 0;
1879                adapter->reset_ctx_cnt = 0;
1880                clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1881                qlcnic_sriov_vf_detach(adapter);
1882        }
1883        return 0;
1884}
1885
1886static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1887{
1888        dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1889        return 0;
1890}
1891
1892static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1893{
1894        if (adapter->fhash.fnum)
1895                qlcnic_prune_lb_filters(adapter);
1896}
1897
1898static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1899{
1900        struct qlcnic_adapter *adapter;
1901        struct qlc_83xx_idc *idc;
1902        int ret = 0;
1903
1904        adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1905        idc = &adapter->ahw->idc;
1906        idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1907
1908        switch (idc->curr_state) {
1909        case QLC_83XX_IDC_DEV_READY:
1910                ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1911                break;
1912        case QLC_83XX_IDC_DEV_NEED_RESET:
1913        case QLC_83XX_IDC_DEV_INIT:
1914                ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1915                break;
1916        case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1917                ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1918                break;
1919        case QLC_83XX_IDC_DEV_FAILED:
1920                ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1921                break;
1922        case QLC_83XX_IDC_DEV_QUISCENT:
1923                break;
1924        default:
1925                ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1926        }
1927
1928        idc->prev_state = idc->curr_state;
1929        qlcnic_sriov_vf_periodic_tasks(adapter);
1930
1931        if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1932                qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1933                                     idc->delay);
1934}
1935
1936static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1937{
1938        while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1939                msleep(20);
1940
1941        clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1942        clear_bit(__QLCNIC_RESETTING, &adapter->state);
1943        cancel_delayed_work_sync(&adapter->fw_work);
1944}
1945
1946static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1947                                      struct qlcnic_vf_info *vf, u16 vlan_id)
1948{
1949        int i, err = -EINVAL;
1950
1951        if (!vf->sriov_vlans)
1952                return err;
1953
1954        spin_lock_bh(&vf->vlan_list_lock);
1955
1956        for (i = 0; i < sriov->num_allowed_vlans; i++) {
1957                if (vf->sriov_vlans[i] == vlan_id) {
1958                        err = 0;
1959                        break;
1960                }
1961        }
1962
1963        spin_unlock_bh(&vf->vlan_list_lock);
1964        return err;
1965}
1966
1967static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1968                                           struct qlcnic_vf_info *vf)
1969{
1970        int err = 0;
1971
1972        spin_lock_bh(&vf->vlan_list_lock);
1973
1974        if (vf->num_vlan >= sriov->num_allowed_vlans)
1975                err = -EINVAL;
1976
1977        spin_unlock_bh(&vf->vlan_list_lock);
1978        return err;
1979}
1980
1981static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1982                                          u16 vid, u8 enable)
1983{
1984        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1985        struct qlcnic_vf_info *vf;
1986        bool vlan_exist;
1987        u8 allowed = 0;
1988        int i;
1989
1990        vf = &adapter->ahw->sriov->vf_info[0];
1991        vlan_exist = qlcnic_sriov_check_any_vlan(vf);
1992        if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1993                return -EINVAL;
1994
1995        if (enable) {
1996                if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1997                        return -EINVAL;
1998
1999                if (qlcnic_sriov_validate_num_vlans(sriov, vf))
2000                        return -EINVAL;
2001
2002                if (sriov->any_vlan) {
2003                        for (i = 0; i < sriov->num_allowed_vlans; i++) {
2004                                if (sriov->allowed_vlans[i] == vid)
2005                                        allowed = 1;
2006                        }
2007
2008                        if (!allowed)
2009                                return -EINVAL;
2010                }
2011        } else {
2012                if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
2013                        return -EINVAL;
2014        }
2015
2016        return 0;
2017}
2018
2019static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
2020                                        enum qlcnic_vlan_operations opcode)
2021{
2022        struct qlcnic_adapter *adapter = vf->adapter;
2023        struct qlcnic_sriov *sriov;
2024
2025        sriov = adapter->ahw->sriov;
2026
2027        if (!vf->sriov_vlans)
2028                return;
2029
2030        spin_lock_bh(&vf->vlan_list_lock);
2031
2032        switch (opcode) {
2033        case QLC_VLAN_ADD:
2034                qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
2035                break;
2036        case QLC_VLAN_DELETE:
2037                qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
2038                break;
2039        default:
2040                netdev_err(adapter->netdev, "Invalid VLAN operation\n");
2041        }
2042
2043        spin_unlock_bh(&vf->vlan_list_lock);
2044        return;
2045}
2046
2047int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2048                                   u16 vid, u8 enable)
2049{
2050        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2051        struct net_device *netdev = adapter->netdev;
2052        struct qlcnic_vf_info *vf;
2053        struct qlcnic_cmd_args cmd;
2054        int ret;
2055
2056        memset(&cmd, 0, sizeof(cmd));
2057        if (vid == 0)
2058                return 0;
2059
2060        vf = &adapter->ahw->sriov->vf_info[0];
2061        ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
2062        if (ret)
2063                return ret;
2064
2065        ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2066                                             QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2067        if (ret)
2068                return ret;
2069
2070        cmd.req.arg[1] = (enable & 1) | vid << 16;
2071
2072        qlcnic_sriov_cleanup_async_list(&sriov->bc);
2073        ret = qlcnic_issue_cmd(adapter, &cmd);
2074        if (ret) {
2075                dev_err(&adapter->pdev->dev,
2076                        "Failed to configure guest VLAN, err=%d\n", ret);
2077        } else {
2078                netif_addr_lock_bh(netdev);
2079                qlcnic_free_mac_list(adapter);
2080                netif_addr_unlock_bh(netdev);
2081
2082                if (enable)
2083                        qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2084                else
2085                        qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2086
2087                netif_addr_lock_bh(netdev);
2088                qlcnic_set_multi(netdev);
2089                netif_addr_unlock_bh(netdev);
2090        }
2091
2092        qlcnic_free_mbx_args(&cmd);
2093        return ret;
2094}
2095
2096static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2097{
2098        struct list_head *head = &adapter->mac_list;
2099        struct qlcnic_mac_vlan_list *cur;
2100
2101        while (!list_empty(head)) {
2102                cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2103                qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2104                                          QLCNIC_MAC_DEL);
2105                list_del(&cur->list);
2106                kfree(cur);
2107        }
2108}
2109
2110
2111static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2112{
2113        struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2114        struct net_device *netdev = adapter->netdev;
2115
2116        netif_device_detach(netdev);
2117        qlcnic_cancel_idc_work(adapter);
2118
2119        if (netif_running(netdev))
2120                qlcnic_down(adapter, netdev);
2121
2122        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2123        qlcnic_sriov_cfg_bc_intr(adapter, 0);
2124        qlcnic_83xx_disable_mbx_intr(adapter);
2125        cancel_delayed_work_sync(&adapter->idc_aen_work);
2126
2127        return pci_save_state(pdev);
2128}
2129
2130static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2131{
2132        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2133        struct net_device *netdev = adapter->netdev;
2134        int err;
2135
2136        set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2137        qlcnic_83xx_enable_mbx_interrupt(adapter);
2138        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2139        if (err)
2140                return err;
2141
2142        err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2143        if (!err) {
2144                if (netif_running(netdev)) {
2145                        err = qlcnic_up(adapter, netdev);
2146                        if (!err)
2147                                qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2148                }
2149        }
2150
2151        netif_device_attach(netdev);
2152        qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2153                             idc->delay);
2154        return err;
2155}
2156
2157void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2158{
2159        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2160        struct qlcnic_vf_info *vf;
2161        int i;
2162
2163        for (i = 0; i < sriov->num_vfs; i++) {
2164                vf = &sriov->vf_info[i];
2165                vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2166                                          sizeof(*vf->sriov_vlans), GFP_KERNEL);
2167        }
2168}
2169
2170void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2171{
2172        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2173        struct qlcnic_vf_info *vf;
2174        int i;
2175
2176        for (i = 0; i < sriov->num_vfs; i++) {
2177                vf = &sriov->vf_info[i];
2178                kfree(vf->sriov_vlans);
2179                vf->sriov_vlans = NULL;
2180        }
2181}
2182
2183void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2184                              struct qlcnic_vf_info *vf, u16 vlan_id)
2185{
2186        int i;
2187
2188        for (i = 0; i < sriov->num_allowed_vlans; i++) {
2189                if (!vf->sriov_vlans[i]) {
2190                        vf->sriov_vlans[i] = vlan_id;
2191                        vf->num_vlan++;
2192                        return;
2193                }
2194        }
2195}
2196
2197void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2198                              struct qlcnic_vf_info *vf, u16 vlan_id)
2199{
2200        int i;
2201
2202        for (i = 0; i < sriov->num_allowed_vlans; i++) {
2203                if (vf->sriov_vlans[i] == vlan_id) {
2204                        vf->sriov_vlans[i] = 0;
2205                        vf->num_vlan--;
2206                        return;
2207                }
2208        }
2209}
2210
2211bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2212{
2213        bool err = false;
2214
2215        spin_lock_bh(&vf->vlan_list_lock);
2216
2217        if (vf->num_vlan)
2218                err = true;
2219
2220        spin_unlock_bh(&vf->vlan_list_lock);
2221        return err;
2222}
2223