linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
<<
>>
Prefs
   1/* bnx2x_sriov.c: QLogic Everest network driver.
   2 *
   3 * Copyright 2009-2013 Broadcom Corporation
   4 * Copyright 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * Unless you and QLogic execute a separate written software license
   8 * agreement governing use of this software, this software is licensed to you
   9 * under the terms of the GNU General Public License version 2, available
  10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  11 *
  12 * Notwithstanding the above, under no circumstances may you combine this
  13 * software in any way with any other QLogic software provided under a
  14 * license other than the GPL, without QLogic's express prior written
  15 * consent.
  16 *
  17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  18 * Written by: Shmulik Ravid
  19 *             Ariel Elior <ariel.elior@qlogic.com>
  20 *
  21 */
  22#include "bnx2x.h"
  23#include "bnx2x_init.h"
  24#include "bnx2x_cmn.h"
  25#include "bnx2x_sp.h"
  26#include <linux/crc32.h>
  27#include <linux/if_vlan.h>
  28
  29static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
  30                            struct bnx2x_virtf **vf,
  31                            struct pf_vf_bulletin_content **bulletin,
  32                            bool test_queue);
  33
  34/* General service functions */
  35static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  36                                         u16 pf_id)
  37{
  38        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  39                pf_id);
  40        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  41                pf_id);
  42        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  43                pf_id);
  44        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  45                pf_id);
  46}
  47
  48static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  49                                        u8 enable)
  50{
  51        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  52                enable);
  53        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  54                enable);
  55        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  56                enable);
  57        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  58                enable);
  59}
  60
  61int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  62{
  63        int idx;
  64
  65        for_each_vf(bp, idx)
  66                if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  67                        break;
  68        return idx;
  69}
  70
  71static
  72struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  73{
  74        u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  75        return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  76}
  77
  78static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  79                                u8 igu_sb_id, u8 segment, u16 index, u8 op,
  80                                u8 update)
  81{
  82        /* acking a VF sb through the PF - use the GRC */
  83        u32 ctl;
  84        u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  85        u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  86        u32 func_encode = vf->abs_vfid;
  87        u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  88        struct igu_regular cmd_data = {0};
  89
  90        cmd_data.sb_id_and_flags =
  91                        ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  92                         (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  93                         (update << IGU_REGULAR_BUPDATE_SHIFT) |
  94                         (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  95
  96        ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
  97              func_encode << IGU_CTRL_REG_FID_SHIFT             |
  98              IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  99
 100        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 101           cmd_data.sb_id_and_flags, igu_addr_data);
 102        REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
 103        barrier();
 104
 105        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 106           ctl, igu_addr_ctl);
 107        REG_WR(bp, igu_addr_ctl, ctl);
 108        barrier();
 109}
 110
 111static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
 112                                       struct bnx2x_virtf *vf,
 113                                       bool print_err)
 114{
 115        if (!bnx2x_leading_vfq(vf, sp_initialized)) {
 116                if (print_err)
 117                        BNX2X_ERR("Slowpath objects not yet initialized!\n");
 118                else
 119                        DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
 120                return false;
 121        }
 122        return true;
 123}
 124
 125/* VFOP operations states */
 126void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 127                              struct bnx2x_queue_init_params *init_params,
 128                              struct bnx2x_queue_setup_params *setup_params,
 129                              u16 q_idx, u16 sb_idx)
 130{
 131        DP(BNX2X_MSG_IOV,
 132           "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
 133           vf->abs_vfid,
 134           q_idx,
 135           sb_idx,
 136           init_params->tx.sb_cq_index,
 137           init_params->tx.hc_rate,
 138           setup_params->flags,
 139           setup_params->txq_params.traffic_type);
 140}
 141
 142void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 143                            struct bnx2x_queue_init_params *init_params,
 144                            struct bnx2x_queue_setup_params *setup_params,
 145                            u16 q_idx, u16 sb_idx)
 146{
 147        struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
 148
 149        DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
 150           "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
 151           vf->abs_vfid,
 152           q_idx,
 153           sb_idx,
 154           init_params->rx.sb_cq_index,
 155           init_params->rx.hc_rate,
 156           setup_params->gen_params.mtu,
 157           rxq_params->buf_sz,
 158           rxq_params->sge_buf_sz,
 159           rxq_params->max_sges_pkt,
 160           rxq_params->tpa_agg_sz,
 161           setup_params->flags,
 162           rxq_params->drop_flags,
 163           rxq_params->cache_line_log);
 164}
 165
 166void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
 167                           struct bnx2x_virtf *vf,
 168                           struct bnx2x_vf_queue *q,
 169                           struct bnx2x_vf_queue_construct_params *p,
 170                           unsigned long q_type)
 171{
 172        struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
 173        struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
 174
 175        /* INIT */
 176
 177        /* Enable host coalescing in the transition to INIT state */
 178        if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
 179                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
 180
 181        if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
 182                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
 183
 184        /* FW SB ID */
 185        init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 186        init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 187
 188        /* context */
 189        init_p->cxts[0] = q->cxt;
 190
 191        /* SETUP */
 192
 193        /* Setup-op general parameters */
 194        setup_p->gen_params.spcl_id = vf->sp_cl_id;
 195        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 196        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 197
 198        /* Setup-op flags:
 199         * collect statistics, zero statistics, local-switching, security,
 200         * OV for Flex10, RSS and MCAST for leading
 201         */
 202        if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
 203                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
 204
 205        /* for VFs, enable tx switching, bd coherency, and mac address
 206         * anti-spoofing
 207         */
 208        __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
 209        __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
 210        if (vf->spoofchk)
 211                __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 212        else
 213                __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 214
 215        /* Setup-op rx parameters */
 216        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
 217                struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
 218
 219                rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
 220                rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 221                rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
 222
 223                if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
 224                        rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
 225        }
 226
 227        /* Setup-op tx parameters */
 228        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
 229                setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
 230                setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 231        }
 232}
 233
 234static int bnx2x_vf_queue_create(struct bnx2x *bp,
 235                                 struct bnx2x_virtf *vf, int qid,
 236                                 struct bnx2x_vf_queue_construct_params *qctor)
 237{
 238        struct bnx2x_queue_state_params *q_params;
 239        int rc = 0;
 240
 241        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 242
 243        /* Prepare ramrod information */
 244        q_params = &qctor->qstate;
 245        q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 246        set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 247
 248        if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
 249            BNX2X_Q_LOGICAL_STATE_ACTIVE) {
 250                DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
 251                goto out;
 252        }
 253
 254        /* Run Queue 'construction' ramrods */
 255        q_params->cmd = BNX2X_Q_CMD_INIT;
 256        rc = bnx2x_queue_state_change(bp, q_params);
 257        if (rc)
 258                goto out;
 259
 260        memcpy(&q_params->params.setup, &qctor->prep_qsetup,
 261               sizeof(struct bnx2x_queue_setup_params));
 262        q_params->cmd = BNX2X_Q_CMD_SETUP;
 263        rc = bnx2x_queue_state_change(bp, q_params);
 264        if (rc)
 265                goto out;
 266
 267        /* enable interrupts */
 268        bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
 269                            USTORM_ID, 0, IGU_INT_ENABLE, 0);
 270out:
 271        return rc;
 272}
 273
 274static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
 275                                  int qid)
 276{
 277        enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
 278                                       BNX2X_Q_CMD_TERMINATE,
 279                                       BNX2X_Q_CMD_CFC_DEL};
 280        struct bnx2x_queue_state_params q_params;
 281        int rc, i;
 282
 283        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 284
 285        /* Prepare ramrod information */
 286        memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
 287        q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 288        set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 289
 290        if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
 291            BNX2X_Q_LOGICAL_STATE_STOPPED) {
 292                DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
 293                goto out;
 294        }
 295
 296        /* Run Queue 'destruction' ramrods */
 297        for (i = 0; i < ARRAY_SIZE(cmds); i++) {
 298                q_params.cmd = cmds[i];
 299                rc = bnx2x_queue_state_change(bp, &q_params);
 300                if (rc) {
 301                        BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
 302                        return rc;
 303                }
 304        }
 305out:
 306        /* Clean Context */
 307        if (bnx2x_vfq(vf, qid, cxt)) {
 308                bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
 309                bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
 310        }
 311
 312        return 0;
 313}
 314
 315static void
 316bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
 317{
 318        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 319        if (vf) {
 320                /* the first igu entry belonging to VFs of this PF */
 321                if (!BP_VFDB(bp)->first_vf_igu_entry)
 322                        BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
 323
 324                /* the first igu entry belonging to this VF */
 325                if (!vf_sb_count(vf))
 326                        vf->igu_base_id = igu_sb_id;
 327
 328                ++vf_sb_count(vf);
 329                ++vf->sb_count;
 330        }
 331        BP_VFDB(bp)->vf_sbs_pool++;
 332}
 333
 334static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
 335                                   int qid, bool drv_only, int type)
 336{
 337        struct bnx2x_vlan_mac_ramrod_params ramrod;
 338        int rc;
 339
 340        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
 341                          (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 342                          (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 343
 344        /* Prepare ramrod params */
 345        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 346        if (type == BNX2X_VF_FILTER_VLAN_MAC) {
 347                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 348                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 349        } else if (type == BNX2X_VF_FILTER_MAC) {
 350                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 351                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 352        } else {
 353                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 354        }
 355        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 356
 357        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 358        if (drv_only)
 359                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 360        else
 361                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 362
 363        /* Start deleting */
 364        rc = ramrod.vlan_mac_obj->delete_all(bp,
 365                                             ramrod.vlan_mac_obj,
 366                                             &ramrod.user_req.vlan_mac_flags,
 367                                             &ramrod.ramrod_flags);
 368        if (rc) {
 369                BNX2X_ERR("Failed to delete all %s\n",
 370                          (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 371                          (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 372                return rc;
 373        }
 374
 375        return 0;
 376}
 377
 378static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 379                                    struct bnx2x_virtf *vf, int qid,
 380                                    struct bnx2x_vf_mac_vlan_filter *filter,
 381                                    bool drv_only)
 382{
 383        struct bnx2x_vlan_mac_ramrod_params ramrod;
 384        int rc;
 385
 386        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 387           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
 388           (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
 389           (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 390
 391        /* Prepare ramrod params */
 392        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 393        if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
 394                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 395                ramrod.user_req.u.vlan.vlan = filter->vid;
 396                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 397                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 398        } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 399                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 400                ramrod.user_req.u.vlan.vlan = filter->vid;
 401        } else {
 402                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 403                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 404                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 405        }
 406        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 407                                            BNX2X_VLAN_MAC_DEL;
 408
 409        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 410        if (drv_only)
 411                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 412        else
 413                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 414
 415        /* Add/Remove the filter */
 416        rc = bnx2x_config_vlan_mac(bp, &ramrod);
 417        if (rc == -EEXIST)
 418                return 0;
 419        if (rc) {
 420                BNX2X_ERR("Failed to %s %s\n",
 421                          filter->add ? "add" : "delete",
 422                          (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
 423                                "VLAN-MAC" :
 424                          (filter->type == BNX2X_VF_FILTER_MAC) ?
 425                                "MAC" : "VLAN");
 426                return rc;
 427        }
 428
 429        filter->applied = true;
 430
 431        return 0;
 432}
 433
 434int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 435                                  struct bnx2x_vf_mac_vlan_filters *filters,
 436                                  int qid, bool drv_only)
 437{
 438        int rc = 0, i;
 439
 440        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 441
 442        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
 443                return -EINVAL;
 444
 445        /* Prepare ramrod params */
 446        for (i = 0; i < filters->count; i++) {
 447                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
 448                                              &filters->filters[i], drv_only);
 449                if (rc)
 450                        break;
 451        }
 452
 453        /* Rollback if needed */
 454        if (i != filters->count) {
 455                BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 456                          i, filters->count);
 457                while (--i >= 0) {
 458                        if (!filters->filters[i].applied)
 459                                continue;
 460                        filters->filters[i].add = !filters->filters[i].add;
 461                        bnx2x_vf_mac_vlan_config(bp, vf, qid,
 462                                                 &filters->filters[i],
 463                                                 drv_only);
 464                }
 465        }
 466
 467        /* It's our responsibility to free the filters */
 468        kfree(filters);
 469
 470        return rc;
 471}
 472
 473int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
 474                         struct bnx2x_vf_queue_construct_params *qctor)
 475{
 476        int rc;
 477
 478        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 479
 480        rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
 481        if (rc)
 482                goto op_err;
 483
 484        /* Schedule the configuration of any pending vlan filters */
 485        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 486                               BNX2X_MSG_IOV);
 487        return 0;
 488op_err:
 489        BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 490        return rc;
 491}
 492
 493static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
 494                               int qid)
 495{
 496        int rc;
 497
 498        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 499
 500        /* If needed, clean the filtering data base */
 501        if ((qid == LEADING_IDX) &&
 502            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
 503                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 504                                             BNX2X_VF_FILTER_VLAN_MAC);
 505                if (rc)
 506                        goto op_err;
 507                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 508                                             BNX2X_VF_FILTER_VLAN);
 509                if (rc)
 510                        goto op_err;
 511                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 512                                             BNX2X_VF_FILTER_MAC);
 513                if (rc)
 514                        goto op_err;
 515        }
 516
 517        /* Terminate queue */
 518        if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
 519                struct bnx2x_queue_state_params qstate;
 520
 521                memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
 522                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 523                qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
 524                qstate.cmd = BNX2X_Q_CMD_TERMINATE;
 525                set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 526                rc = bnx2x_queue_state_change(bp, &qstate);
 527                if (rc)
 528                        goto op_err;
 529        }
 530
 531        return 0;
 532op_err:
 533        BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 534        return rc;
 535}
 536
 537int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 538                   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 539{
 540        struct bnx2x_mcast_list_elem *mc = NULL;
 541        struct bnx2x_mcast_ramrod_params mcast;
 542        int rc, i;
 543
 544        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 545
 546        /* Prepare Multicast command */
 547        memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
 548        mcast.mcast_obj = &vf->mcast_obj;
 549        if (drv_only)
 550                set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
 551        else
 552                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
 553        if (mc_num) {
 554                mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
 555                             GFP_KERNEL);
 556                if (!mc) {
 557                        BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
 558                        return -ENOMEM;
 559                }
 560        }
 561
 562        if (mc_num) {
 563                INIT_LIST_HEAD(&mcast.mcast_list);
 564                for (i = 0; i < mc_num; i++) {
 565                        mc[i].mac = mcasts[i];
 566                        list_add_tail(&mc[i].link,
 567                                      &mcast.mcast_list);
 568                }
 569
 570                /* add new mcasts */
 571                mcast.mcast_list_len = mc_num;
 572                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
 573                if (rc)
 574                        BNX2X_ERR("Failed to set multicasts\n");
 575        } else {
 576                /* clear existing mcasts */
 577                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
 578                if (rc)
 579                        BNX2X_ERR("Failed to remove multicasts\n");
 580        }
 581
 582        kfree(mc);
 583
 584        return rc;
 585}
 586
 587static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
 588                                  struct bnx2x_rx_mode_ramrod_params *ramrod,
 589                                  struct bnx2x_virtf *vf,
 590                                  unsigned long accept_flags)
 591{
 592        struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
 593
 594        memset(ramrod, 0, sizeof(*ramrod));
 595        ramrod->cid = vfq->cid;
 596        ramrod->cl_id = vfq_cl_id(vf, vfq);
 597        ramrod->rx_mode_obj = &bp->rx_mode_obj;
 598        ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
 599        ramrod->rx_accept_flags = accept_flags;
 600        ramrod->tx_accept_flags = accept_flags;
 601        ramrod->pstate = &vf->filter_state;
 602        ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
 603
 604        set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
 605        set_bit(RAMROD_RX, &ramrod->ramrod_flags);
 606        set_bit(RAMROD_TX, &ramrod->ramrod_flags);
 607
 608        ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
 609        ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 610}
 611
 612int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
 613                    int qid, unsigned long accept_flags)
 614{
 615        struct bnx2x_rx_mode_ramrod_params ramrod;
 616
 617        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 618
 619        bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
 620        set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 621        vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
 622        return bnx2x_config_rx_mode(bp, &ramrod);
 623}
 624
 625int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 626{
 627        int rc;
 628
 629        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 630
 631        /* Remove all classification configuration for leading queue */
 632        if (qid == LEADING_IDX) {
 633                rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
 634                if (rc)
 635                        goto op_err;
 636
 637                /* Remove filtering if feasible */
 638                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 639                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 640                                                     false,
 641                                                     BNX2X_VF_FILTER_VLAN_MAC);
 642                        if (rc)
 643                                goto op_err;
 644                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 645                                                     false,
 646                                                     BNX2X_VF_FILTER_VLAN);
 647                        if (rc)
 648                                goto op_err;
 649                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 650                                                     false,
 651                                                     BNX2X_VF_FILTER_MAC);
 652                        if (rc)
 653                                goto op_err;
 654                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
 655                        if (rc)
 656                                goto op_err;
 657                }
 658        }
 659
 660        /* Destroy queue */
 661        rc = bnx2x_vf_queue_destroy(bp, vf, qid);
 662        if (rc)
 663                goto op_err;
 664        return rc;
 665op_err:
 666        BNX2X_ERR("vf[%d:%d] error: rc %d\n",
 667                  vf->abs_vfid, qid, rc);
 668        return rc;
 669}
 670
 671/* VF enable primitives
 672 * when pretend is required the caller is responsible
 673 * for calling pretend prior to calling these routines
 674 */
 675
 676/* internal vf enable - until vf is enabled internally all transactions
 677 * are blocked. This routine should always be called last with pretend.
 678 */
 679static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
 680{
 681        REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
 682}
 683
 684/* clears vf error in all semi blocks */
 685static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
 686{
 687        REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
 688        REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
 689        REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
 690        REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
 691}
 692
 693static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
 694{
 695        u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
 696        u32 was_err_reg = 0;
 697
 698        switch (was_err_group) {
 699        case 0:
 700            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
 701            break;
 702        case 1:
 703            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
 704            break;
 705        case 2:
 706            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
 707            break;
 708        case 3:
 709            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
 710            break;
 711        }
 712        REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
 713}
 714
 715static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 716{
 717        int i;
 718        u32 val;
 719
 720        /* Set VF masks and configuration - pretend */
 721        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 722
 723        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 724        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 725        REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 726        REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 727        REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 728        REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 729
 730        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 731        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
 732        val &= ~IGU_VF_CONF_PARENT_MASK;
 733        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 734        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 735
 736        DP(BNX2X_MSG_IOV,
 737           "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
 738           vf->abs_vfid, val);
 739
 740        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 741
 742        /* iterate over all queues, clear sb consumer */
 743        for (i = 0; i < vf_sb_count(vf); i++) {
 744                u8 igu_sb_id = vf_igu_sb(vf, i);
 745
 746                /* zero prod memory */
 747                REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
 748
 749                /* clear sb state machine */
 750                bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
 751                                       false /* VF */);
 752
 753                /* disable + update */
 754                bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
 755                                    IGU_INT_DISABLE, 1);
 756        }
 757}
 758
 759void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
 760{
 761        /* set the VF-PF association in the FW */
 762        storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
 763        storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
 764
 765        /* clear vf errors*/
 766        bnx2x_vf_semi_clear_err(bp, abs_vfid);
 767        bnx2x_vf_pglue_clear_err(bp, abs_vfid);
 768
 769        /* internal vf-enable - pretend */
 770        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
 771        DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
 772        bnx2x_vf_enable_internal(bp, true);
 773        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 774}
 775
 776static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 777{
 778        /* Reset vf in IGU  interrupts are still disabled */
 779        bnx2x_vf_igu_reset(bp, vf);
 780
 781        /* pretend to enable the vf with the PBF */
 782        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 783        REG_WR(bp, PBF_REG_DISABLE_VF, 0);
 784        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 785}
 786
 787static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
 788{
 789        struct pci_dev *dev;
 790        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 791
 792        if (!vf)
 793                return false;
 794
 795        dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
 796        if (dev)
 797                return bnx2x_is_pcie_pending(dev);
 798        return false;
 799}
 800
 801int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
 802{
 803        /* Verify no pending pci transactions */
 804        if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
 805                BNX2X_ERR("PCIE Transactions still pending\n");
 806
 807        return 0;
 808}
 809
 810/* must be called after the number of PF queues and the number of VFs are
 811 * both known
 812 */
 813static void
 814bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 815{
 816        struct vf_pf_resc_request *resc = &vf->alloc_resc;
 817
 818        /* will be set only during VF-ACQUIRE */
 819        resc->num_rxqs = 0;
 820        resc->num_txqs = 0;
 821
 822        resc->num_mac_filters = VF_MAC_CREDIT_CNT;
 823        resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 824
 825        /* no real limitation */
 826        resc->num_mc_filters = 0;
 827
 828        /* num_sbs already set */
 829        resc->num_sbs = vf->sb_count;
 830}
 831
 832/* FLR routines: */
 833static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 834{
 835        /* reset the state variables */
 836        bnx2x_iov_static_resc(bp, vf);
 837        vf->state = VF_FREE;
 838}
 839
 840static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
 841{
 842        u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 843
 844        /* DQ usage counter */
 845        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 846        bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
 847                                        "DQ VF usage counter timed out",
 848                                        poll_cnt);
 849        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 850
 851        /* FW cleanup command - poll for the results */
 852        if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
 853                                   poll_cnt))
 854                BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
 855
 856        /* verify TX hw is flushed */
 857        bnx2x_tx_hw_flushed(bp, poll_cnt);
 858}
 859
 860static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 861{
 862        int rc, i;
 863
 864        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 865
 866        /* the cleanup operations are valid if and only if the VF
 867         * was first acquired.
 868         */
 869        for (i = 0; i < vf_rxq_count(vf); i++) {
 870                rc = bnx2x_vf_queue_flr(bp, vf, i);
 871                if (rc)
 872                        goto out;
 873        }
 874
 875        /* remove multicasts */
 876        bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 877
 878        /* dispatch final cleanup and wait for HW queues to flush */
 879        bnx2x_vf_flr_clnup_hw(bp, vf);
 880
 881        /* release VF resources */
 882        bnx2x_vf_free_resc(bp, vf);
 883
 884        vf->malicious = false;
 885
 886        /* re-open the mailbox */
 887        bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 888        return;
 889out:
 890        BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
 891                  vf->abs_vfid, i, rc);
 892}
 893
 894static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 895{
 896        struct bnx2x_virtf *vf;
 897        int i;
 898
 899        for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
 900                /* VF should be RESET & in FLR cleanup states */
 901                if (bnx2x_vf(bp, i, state) != VF_RESET ||
 902                    !bnx2x_vf(bp, i, flr_clnup_stage))
 903                        continue;
 904
 905                DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
 906                   i, BNX2X_NR_VIRTFN(bp));
 907
 908                vf = BP_VF(bp, i);
 909
 910                /* lock the vf pf channel */
 911                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 912
 913                /* invoke the VF FLR SM */
 914                bnx2x_vf_flr(bp, vf);
 915
 916                /* mark the VF to be ACKED and continue */
 917                vf->flr_clnup_stage = false;
 918                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 919        }
 920
 921        /* Acknowledge the handled VFs.
 922         * we are acknowledge all the vfs which an flr was requested for, even
 923         * if amongst them there are such that we never opened, since the mcp
 924         * will interrupt us immediately again if we only ack some of the bits,
 925         * resulting in an endless loop. This can happen for example in KVM
 926         * where an 'all ones' flr request is sometimes given by hyper visor
 927         */
 928        DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
 929           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 930        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 931                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
 932                          bp->vfdb->flrd_vfs[i]);
 933
 934        bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
 935
 936        /* clear the acked bits - better yet if the MCP implemented
 937         * write to clear semantics
 938         */
 939        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 940                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
 941}
 942
 943void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
 944{
 945        int i;
 946
 947        /* Read FLR'd VFs */
 948        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 949                bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
 950
 951        DP(BNX2X_MSG_MCP,
 952           "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
 953           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 954
 955        for_each_vf(bp, i) {
 956                struct bnx2x_virtf *vf = BP_VF(bp, i);
 957                u32 reset = 0;
 958
 959                if (vf->abs_vfid < 32)
 960                        reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
 961                else
 962                        reset = bp->vfdb->flrd_vfs[1] &
 963                                (1 << (vf->abs_vfid - 32));
 964
 965                if (reset) {
 966                        /* set as reset and ready for cleanup */
 967                        vf->state = VF_RESET;
 968                        vf->flr_clnup_stage = true;
 969
 970                        DP(BNX2X_MSG_IOV,
 971                           "Initiating Final cleanup for VF %d\n",
 972                           vf->abs_vfid);
 973                }
 974        }
 975
 976        /* do the FLR cleanup for all marked VFs*/
 977        bnx2x_vf_flr_clnup(bp);
 978}
 979
 980/* IOV global initialization routines  */
 981void bnx2x_iov_init_dq(struct bnx2x *bp)
 982{
 983        if (!IS_SRIOV(bp))
 984                return;
 985
 986        /* Set the DQ such that the CID reflect the abs_vfid */
 987        REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
 988        REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
 989
 990        /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
 991         * the PF L2 queues
 992         */
 993        REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
 994
 995        /* The VF window size is the log2 of the max number of CIDs per VF */
 996        REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
 997
 998        /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
 999         * the Pf doorbell size although the 2 are independent.
1000         */
1001        REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1002
1003        /* No security checks for now -
1004         * configure single rule (out of 16) mask = 0x1, value = 0x0,
1005         * CID range 0 - 0x1ffff
1006         */
1007        REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1008        REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1009        REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1010        REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1011
1012        /* set the VF doorbell threshold. This threshold represents the amount
1013         * of doorbells allowed in the main DORQ fifo for a specific VF.
1014         */
1015        REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1016}
1017
1018void bnx2x_iov_init_dmae(struct bnx2x *bp)
1019{
1020        if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1021                REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1022}
1023
1024static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
1025{
1026        struct pci_dev *dev = bp->pdev;
1027
1028        return pci_domain_nr(dev->bus);
1029}
1030
1031static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1032{
1033        struct pci_dev *dev = bp->pdev;
1034        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1035
1036        return dev->bus->number + ((dev->devfn + iov->offset +
1037                                    iov->stride * vfid) >> 8);
1038}
1039
1040static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1041{
1042        struct pci_dev *dev = bp->pdev;
1043        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1044
1045        return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1046}
1047
1048static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1049{
1050        int i, n;
1051        struct pci_dev *dev = bp->pdev;
1052        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1053
1054        for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1055                u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1056                u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1057
1058                size /= iov->total;
1059                vf->bars[n].bar = start + size * vf->abs_vfid;
1060                vf->bars[n].size = size;
1061        }
1062}
1063
1064static int
1065bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1066{
1067        int sb_id;
1068        u32 val;
1069        u8 fid, current_pf = 0;
1070
1071        /* IGU in normal mode - read CAM */
1072        for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1073                val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1074                if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1075                        continue;
1076                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1077                if (fid & IGU_FID_ENCODE_IS_PF)
1078                        current_pf = fid & IGU_FID_PF_NUM_MASK;
1079                else if (current_pf == BP_FUNC(bp))
1080                        bnx2x_vf_set_igu_info(bp, sb_id,
1081                                              (fid & IGU_FID_VF_NUM_MASK));
1082                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1083                   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1084                   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1085                   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1086                   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1087        }
1088        DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1089        return BP_VFDB(bp)->vf_sbs_pool;
1090}
1091
1092static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1093{
1094        if (bp->vfdb) {
1095                kfree(bp->vfdb->vfqs);
1096                kfree(bp->vfdb->vfs);
1097                kfree(bp->vfdb);
1098        }
1099        bp->vfdb = NULL;
1100}
1101
1102static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1103{
1104        int pos;
1105        struct pci_dev *dev = bp->pdev;
1106
1107        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1108        if (!pos) {
1109                BNX2X_ERR("failed to find SRIOV capability in device\n");
1110                return -ENODEV;
1111        }
1112
1113        iov->pos = pos;
1114        DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1115        pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1116        pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1117        pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1118        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1119        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1120        pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1121        pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1122        pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1123
1124        return 0;
1125}
1126
1127static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1128{
1129        u32 val;
1130
1131        /* read the SRIOV capability structure
1132         * The fields can be read via configuration read or
1133         * directly from the device (starting at offset PCICFG_OFFSET)
1134         */
1135        if (bnx2x_sriov_pci_cfg_info(bp, iov))
1136                return -ENODEV;
1137
1138        /* get the number of SRIOV bars */
1139        iov->nres = 0;
1140
1141        /* read the first_vfid */
1142        val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1143        iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1144                               * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1145
1146        DP(BNX2X_MSG_IOV,
1147           "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1148           BP_FUNC(bp),
1149           iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1150           iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1151
1152        return 0;
1153}
1154
1155/* must be called after PF bars are mapped */
1156int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1157                       int num_vfs_param)
1158{
1159        int err, i;
1160        struct bnx2x_sriov *iov;
1161        struct pci_dev *dev = bp->pdev;
1162
1163        bp->vfdb = NULL;
1164
1165        /* verify is pf */
1166        if (IS_VF(bp))
1167                return 0;
1168
1169        /* verify sriov capability is present in configuration space */
1170        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1171                return 0;
1172
1173        /* verify chip revision */
1174        if (CHIP_IS_E1x(bp))
1175                return 0;
1176
1177        /* check if SRIOV support is turned off */
1178        if (!num_vfs_param)
1179                return 0;
1180
1181        /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1182        if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1183                BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1184                          BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1185                return 0;
1186        }
1187
1188        /* SRIOV can be enabled only with MSIX */
1189        if (int_mode_param == BNX2X_INT_MODE_MSI ||
1190            int_mode_param == BNX2X_INT_MODE_INTX) {
1191                BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1192                return 0;
1193        }
1194
1195        /* verify ari is enabled */
1196        if (!pci_ari_enabled(bp->pdev->bus)) {
1197                BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1198                return 0;
1199        }
1200
1201        /* verify igu is in normal mode */
1202        if (CHIP_INT_MODE_IS_BC(bp)) {
1203                BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1204                return 0;
1205        }
1206
1207        /* allocate the vfs database */
1208        bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1209        if (!bp->vfdb) {
1210                BNX2X_ERR("failed to allocate vf database\n");
1211                err = -ENOMEM;
1212                goto failed;
1213        }
1214
1215        /* get the sriov info - Linux already collected all the pertinent
1216         * information, however the sriov structure is for the private use
1217         * of the pci module. Also we want this information regardless
1218         * of the hyper-visor.
1219         */
1220        iov = &(bp->vfdb->sriov);
1221        err = bnx2x_sriov_info(bp, iov);
1222        if (err)
1223                goto failed;
1224
1225        /* SR-IOV capability was enabled but there are no VFs*/
1226        if (iov->total == 0) {
1227                err = -EINVAL;
1228                goto failed;
1229        }
1230
1231        iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1232
1233        DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1234           num_vfs_param, iov->nr_virtfn);
1235
1236        /* allocate the vf array */
1237        bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
1238                                sizeof(struct bnx2x_virtf),
1239                                GFP_KERNEL);
1240        if (!bp->vfdb->vfs) {
1241                BNX2X_ERR("failed to allocate vf array\n");
1242                err = -ENOMEM;
1243                goto failed;
1244        }
1245
1246        /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1247        for_each_vf(bp, i) {
1248                bnx2x_vf(bp, i, index) = i;
1249                bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1250                bnx2x_vf(bp, i, state) = VF_FREE;
1251                mutex_init(&bnx2x_vf(bp, i, op_mutex));
1252                bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1253                /* enable spoofchk by default */
1254                bnx2x_vf(bp, i, spoofchk) = 1;
1255        }
1256
1257        /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1258        if (!bnx2x_get_vf_igu_cam_info(bp)) {
1259                BNX2X_ERR("No entries in IGU CAM for vfs\n");
1260                err = -EINVAL;
1261                goto failed;
1262        }
1263
1264        /* allocate the queue arrays for all VFs */
1265        bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
1266                                 sizeof(struct bnx2x_vf_queue),
1267                                 GFP_KERNEL);
1268
1269        if (!bp->vfdb->vfqs) {
1270                BNX2X_ERR("failed to allocate vf queue array\n");
1271                err = -ENOMEM;
1272                goto failed;
1273        }
1274
1275        /* Prepare the VFs event synchronization mechanism */
1276        mutex_init(&bp->vfdb->event_mutex);
1277
1278        mutex_init(&bp->vfdb->bulletin_mutex);
1279
1280        if (SHMEM2_HAS(bp, sriov_switch_mode))
1281                SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1282
1283        return 0;
1284failed:
1285        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1286        __bnx2x_iov_free_vfdb(bp);
1287        return err;
1288}
1289
1290void bnx2x_iov_remove_one(struct bnx2x *bp)
1291{
1292        int vf_idx;
1293
1294        /* if SRIOV is not enabled there's nothing to do */
1295        if (!IS_SRIOV(bp))
1296                return;
1297
1298        bnx2x_disable_sriov(bp);
1299
1300        /* disable access to all VFs */
1301        for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1302                bnx2x_pretend_func(bp,
1303                                   HW_VF_HANDLE(bp,
1304                                                bp->vfdb->sriov.first_vf_in_pf +
1305                                                vf_idx));
1306                DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1307                   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1308                bnx2x_vf_enable_internal(bp, 0);
1309                bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1310        }
1311
1312        /* free vf database */
1313        __bnx2x_iov_free_vfdb(bp);
1314}
1315
1316void bnx2x_iov_free_mem(struct bnx2x *bp)
1317{
1318        int i;
1319
1320        if (!IS_SRIOV(bp))
1321                return;
1322
1323        /* free vfs hw contexts */
1324        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1325                struct hw_dma *cxt = &bp->vfdb->context[i];
1326                BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1327        }
1328
1329        BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1330                       BP_VFDB(bp)->sp_dma.mapping,
1331                       BP_VFDB(bp)->sp_dma.size);
1332
1333        BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1334                       BP_VF_MBX_DMA(bp)->mapping,
1335                       BP_VF_MBX_DMA(bp)->size);
1336
1337        BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1338                       BP_VF_BULLETIN_DMA(bp)->mapping,
1339                       BP_VF_BULLETIN_DMA(bp)->size);
1340}
1341
1342int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1343{
1344        size_t tot_size;
1345        int i, rc = 0;
1346
1347        if (!IS_SRIOV(bp))
1348                return rc;
1349
1350        /* allocate vfs hw contexts */
1351        tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1352                BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1353
1354        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1355                struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1356                cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1357
1358                if (cxt->size) {
1359                        cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1360                        if (!cxt->addr)
1361                                goto alloc_mem_err;
1362                } else {
1363                        cxt->addr = NULL;
1364                        cxt->mapping = 0;
1365                }
1366                tot_size -= cxt->size;
1367        }
1368
1369        /* allocate vfs ramrods dma memory - client_init and set_mac */
1370        tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1371        BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1372                                                   tot_size);
1373        if (!BP_VFDB(bp)->sp_dma.addr)
1374                goto alloc_mem_err;
1375        BP_VFDB(bp)->sp_dma.size = tot_size;
1376
1377        /* allocate mailboxes */
1378        tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1379        BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1380                                                  tot_size);
1381        if (!BP_VF_MBX_DMA(bp)->addr)
1382                goto alloc_mem_err;
1383
1384        BP_VF_MBX_DMA(bp)->size = tot_size;
1385
1386        /* allocate local bulletin boards */
1387        tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1388        BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1389                                                       tot_size);
1390        if (!BP_VF_BULLETIN_DMA(bp)->addr)
1391                goto alloc_mem_err;
1392
1393        BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1394
1395        return 0;
1396
1397alloc_mem_err:
1398        return -ENOMEM;
1399}
1400
1401static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1402                           struct bnx2x_vf_queue *q)
1403{
1404        u8 cl_id = vfq_cl_id(vf, q);
1405        u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1406        unsigned long q_type = 0;
1407
1408        set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1409        set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1410
1411        /* Queue State object */
1412        bnx2x_init_queue_obj(bp, &q->sp_obj,
1413                             cl_id, &q->cid, 1, func_id,
1414                             bnx2x_vf_sp(bp, vf, q_data),
1415                             bnx2x_vf_sp_map(bp, vf, q_data),
1416                             q_type);
1417
1418        /* sp indication is set only when vlan/mac/etc. are initialized */
1419        q->sp_initialized = false;
1420
1421        DP(BNX2X_MSG_IOV,
1422           "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1423           vf->abs_vfid, q->sp_obj.func_id, q->cid);
1424}
1425
1426static int bnx2x_max_speed_cap(struct bnx2x *bp)
1427{
1428        u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1429
1430        if (supported &
1431            (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1432                return 20000;
1433
1434        return 10000; /* assume lowest supported speed is 10G */
1435}
1436
1437int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1438{
1439        struct bnx2x_link_report_data *state = &bp->last_reported_link;
1440        struct pf_vf_bulletin_content *bulletin;
1441        struct bnx2x_virtf *vf;
1442        bool update = true;
1443        int rc = 0;
1444
1445        /* sanity and init */
1446        rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1447        if (rc)
1448                return rc;
1449
1450        mutex_lock(&bp->vfdb->bulletin_mutex);
1451
1452        if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1453                bulletin->valid_bitmap |= 1 << LINK_VALID;
1454
1455                bulletin->link_speed = state->line_speed;
1456                bulletin->link_flags = 0;
1457                if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1458                             &state->link_report_flags))
1459                        bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1460                if (test_bit(BNX2X_LINK_REPORT_FD,
1461                             &state->link_report_flags))
1462                        bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1463                if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1464                             &state->link_report_flags))
1465                        bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1466                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1467                             &state->link_report_flags))
1468                        bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1469        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1470                   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1471                bulletin->valid_bitmap |= 1 << LINK_VALID;
1472                bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1473        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1474                   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1475                bulletin->valid_bitmap |= 1 << LINK_VALID;
1476                bulletin->link_speed = bnx2x_max_speed_cap(bp);
1477                bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1478        } else {
1479                update = false;
1480        }
1481
1482        if (update) {
1483                DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1484                   "vf %d mode %u speed %d flags %x\n", idx,
1485                   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1486
1487                /* Post update on VF's bulletin board */
1488                rc = bnx2x_post_vf_bulletin(bp, idx);
1489                if (rc) {
1490                        BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1491                        goto out;
1492                }
1493        }
1494
1495out:
1496        mutex_unlock(&bp->vfdb->bulletin_mutex);
1497        return rc;
1498}
1499
1500int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1501{
1502        struct bnx2x *bp = netdev_priv(dev);
1503        struct bnx2x_virtf *vf = BP_VF(bp, idx);
1504
1505        if (!vf)
1506                return -EINVAL;
1507
1508        if (vf->link_cfg == link_state)
1509                return 0; /* nothing todo */
1510
1511        vf->link_cfg = link_state;
1512
1513        return bnx2x_iov_link_update_vf(bp, idx);
1514}
1515
1516void bnx2x_iov_link_update(struct bnx2x *bp)
1517{
1518        int vfid;
1519
1520        if (!IS_SRIOV(bp))
1521                return;
1522
1523        for_each_vf(bp, vfid)
1524                bnx2x_iov_link_update_vf(bp, vfid);
1525}
1526
1527/* called by bnx2x_nic_load */
1528int bnx2x_iov_nic_init(struct bnx2x *bp)
1529{
1530        int vfid;
1531
1532        if (!IS_SRIOV(bp)) {
1533                DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1534                return 0;
1535        }
1536
1537        DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1538
1539        /* let FLR complete ... */
1540        msleep(100);
1541
1542        /* initialize vf database */
1543        for_each_vf(bp, vfid) {
1544                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1545
1546                int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1547                        BNX2X_CIDS_PER_VF;
1548
1549                union cdu_context *base_cxt = (union cdu_context *)
1550                        BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1551                        (base_vf_cid & (ILT_PAGE_CIDS-1));
1552
1553                DP(BNX2X_MSG_IOV,
1554                   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1555                   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1556                   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1557
1558                /* init statically provisioned resources */
1559                bnx2x_iov_static_resc(bp, vf);
1560
1561                /* queues are initialized during VF-ACQUIRE */
1562                vf->filter_state = 0;
1563                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1564
1565                bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1566                                       vf_vlan_rules_cnt(vf));
1567                bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1568                                       vf_mac_rules_cnt(vf));
1569
1570                /*  init mcast object - This object will be re-initialized
1571                 *  during VF-ACQUIRE with the proper cl_id and cid.
1572                 *  It needs to be initialized here so that it can be safely
1573                 *  handled by a subsequent FLR flow.
1574                 */
1575                bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1576                                     0xFF, 0xFF, 0xFF,
1577                                     bnx2x_vf_sp(bp, vf, mcast_rdata),
1578                                     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1579                                     BNX2X_FILTER_MCAST_PENDING,
1580                                     &vf->filter_state,
1581                                     BNX2X_OBJ_TYPE_RX_TX);
1582
1583                /* set the mailbox message addresses */
1584                BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1585                        (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1586                        MBX_MSG_ALIGNED_SIZE);
1587
1588                BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1589                        vfid * MBX_MSG_ALIGNED_SIZE;
1590
1591                /* Enable vf mailbox */
1592                bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1593        }
1594
1595        /* Final VF init */
1596        for_each_vf(bp, vfid) {
1597                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1598
1599                /* fill in the BDF and bars */
1600                vf->domain = bnx2x_vf_domain(bp, vfid);
1601                vf->bus = bnx2x_vf_bus(bp, vfid);
1602                vf->devfn = bnx2x_vf_devfn(bp, vfid);
1603                bnx2x_vf_set_bars(bp, vf);
1604
1605                DP(BNX2X_MSG_IOV,
1606                   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1607                   vf->abs_vfid, vf->bus, vf->devfn,
1608                   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1609                   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1610                   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1611        }
1612
1613        return 0;
1614}
1615
1616/* called by bnx2x_chip_cleanup */
1617int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1618{
1619        int i;
1620
1621        if (!IS_SRIOV(bp))
1622                return 0;
1623
1624        /* release all the VFs */
1625        for_each_vf(bp, i)
1626                bnx2x_vf_release(bp, BP_VF(bp, i));
1627
1628        return 0;
1629}
1630
1631/* called by bnx2x_init_hw_func, returns the next ilt line */
1632int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1633{
1634        int i;
1635        struct bnx2x_ilt *ilt = BP_ILT(bp);
1636
1637        if (!IS_SRIOV(bp))
1638                return line;
1639
1640        /* set vfs ilt lines */
1641        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1642                struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1643
1644                ilt->lines[line+i].page = hw_cxt->addr;
1645                ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1646                ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1647        }
1648        return line + i;
1649}
1650
1651static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1652{
1653        return ((cid >= BNX2X_FIRST_VF_CID) &&
1654                ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1655}
1656
1657static
1658void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1659                                        struct bnx2x_vf_queue *vfq,
1660                                        union event_ring_elem *elem)
1661{
1662        unsigned long ramrod_flags = 0;
1663        int rc = 0;
1664        u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1665
1666        /* Always push next commands out, don't wait here */
1667        set_bit(RAMROD_CONT, &ramrod_flags);
1668
1669        switch (echo >> BNX2X_SWCID_SHIFT) {
1670        case BNX2X_FILTER_MAC_PENDING:
1671                rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1672                                           &ramrod_flags);
1673                break;
1674        case BNX2X_FILTER_VLAN_PENDING:
1675                rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1676                                            &ramrod_flags);
1677                break;
1678        default:
1679                BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1680                return;
1681        }
1682        if (rc < 0)
1683                BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1684        else if (rc > 0)
1685                DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1686}
1687
1688static
1689void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1690                               struct bnx2x_virtf *vf)
1691{
1692        struct bnx2x_mcast_ramrod_params rparam = {NULL};
1693        int rc;
1694
1695        rparam.mcast_obj = &vf->mcast_obj;
1696        vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1697
1698        /* If there are pending mcast commands - send them */
1699        if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1700                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1701                if (rc < 0)
1702                        BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1703                                  rc);
1704        }
1705}
1706
1707static
1708void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1709                                 struct bnx2x_virtf *vf)
1710{
1711        smp_mb__before_atomic();
1712        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1713        smp_mb__after_atomic();
1714}
1715
1716static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1717                                           struct bnx2x_virtf *vf)
1718{
1719        vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1720}
1721
1722int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1723{
1724        struct bnx2x_virtf *vf;
1725        int qidx = 0, abs_vfid;
1726        u8 opcode;
1727        u16 cid = 0xffff;
1728
1729        if (!IS_SRIOV(bp))
1730                return 1;
1731
1732        /* first get the cid - the only events we handle here are cfc-delete
1733         * and set-mac completion
1734         */
1735        opcode = elem->message.opcode;
1736
1737        switch (opcode) {
1738        case EVENT_RING_OPCODE_CFC_DEL:
1739                cid = SW_CID(elem->message.data.cfc_del_event.cid);
1740                DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1741                break;
1742        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1743        case EVENT_RING_OPCODE_MULTICAST_RULES:
1744        case EVENT_RING_OPCODE_FILTERS_RULES:
1745        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1746                cid = SW_CID(elem->message.data.eth_event.echo);
1747                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1748                break;
1749        case EVENT_RING_OPCODE_VF_FLR:
1750                abs_vfid = elem->message.data.vf_flr_event.vf_id;
1751                DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1752                   abs_vfid);
1753                goto get_vf;
1754        case EVENT_RING_OPCODE_MALICIOUS_VF:
1755                abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1756                BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1757                          abs_vfid,
1758                          elem->message.data.malicious_vf_event.err_id);
1759                goto get_vf;
1760        default:
1761                return 1;
1762        }
1763
1764        /* check if the cid is the VF range */
1765        if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1766                DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1767                return 1;
1768        }
1769
1770        /* extract vf and rxq index from vf_cid - relies on the following:
1771         * 1. vfid on cid reflects the true abs_vfid
1772         * 2. The max number of VFs (per path) is 64
1773         */
1774        qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1775        abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1776get_vf:
1777        vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1778
1779        if (!vf) {
1780                BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1781                          cid, abs_vfid);
1782                return 0;
1783        }
1784
1785        switch (opcode) {
1786        case EVENT_RING_OPCODE_CFC_DEL:
1787                DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1788                   vf->abs_vfid, qidx);
1789                vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1790                                                       &vfq_get(vf,
1791                                                                qidx)->sp_obj,
1792                                                       BNX2X_Q_CMD_CFC_DEL);
1793                break;
1794        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1795                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1796                   vf->abs_vfid, qidx);
1797                bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1798                break;
1799        case EVENT_RING_OPCODE_MULTICAST_RULES:
1800                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1801                   vf->abs_vfid, qidx);
1802                bnx2x_vf_handle_mcast_eqe(bp, vf);
1803                break;
1804        case EVENT_RING_OPCODE_FILTERS_RULES:
1805                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1806                   vf->abs_vfid, qidx);
1807                bnx2x_vf_handle_filters_eqe(bp, vf);
1808                break;
1809        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1810                DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1811                   vf->abs_vfid, qidx);
1812                bnx2x_vf_handle_rss_update_eqe(bp, vf);
1813                fallthrough;
1814        case EVENT_RING_OPCODE_VF_FLR:
1815                /* Do nothing for now */
1816                return 0;
1817        case EVENT_RING_OPCODE_MALICIOUS_VF:
1818                vf->malicious = true;
1819                return 0;
1820        }
1821
1822        return 0;
1823}
1824
1825static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1826{
1827        /* extract the vf from vf_cid - relies on the following:
1828         * 1. vfid on cid reflects the true abs_vfid
1829         * 2. The max number of VFs (per path) is 64
1830         */
1831        int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1832        return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1833}
1834
1835void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1836                                struct bnx2x_queue_sp_obj **q_obj)
1837{
1838        struct bnx2x_virtf *vf;
1839
1840        if (!IS_SRIOV(bp))
1841                return;
1842
1843        vf = bnx2x_vf_by_cid(bp, vf_cid);
1844
1845        if (vf) {
1846                /* extract queue index from vf_cid - relies on the following:
1847                 * 1. vfid on cid reflects the true abs_vfid
1848                 * 2. The max number of VFs (per path) is 64
1849                 */
1850                int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1851                *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1852        } else {
1853                BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1854        }
1855}
1856
1857void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1858{
1859        int i;
1860        int first_queue_query_index, num_queues_req;
1861        dma_addr_t cur_data_offset;
1862        struct stats_query_entry *cur_query_entry;
1863        u8 stats_count = 0;
1864        bool is_fcoe = false;
1865
1866        if (!IS_SRIOV(bp))
1867                return;
1868
1869        if (!NO_FCOE(bp))
1870                is_fcoe = true;
1871
1872        /* fcoe adds one global request and one queue request */
1873        num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1874        first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1875                (is_fcoe ? 0 : 1);
1876
1877        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1878               "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1879               BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1880               first_queue_query_index + num_queues_req);
1881
1882        cur_data_offset = bp->fw_stats_data_mapping +
1883                offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1884                num_queues_req * sizeof(struct per_queue_stats);
1885
1886        cur_query_entry = &bp->fw_stats_req->
1887                query[first_queue_query_index + num_queues_req];
1888
1889        for_each_vf(bp, i) {
1890                int j;
1891                struct bnx2x_virtf *vf = BP_VF(bp, i);
1892
1893                if (vf->state != VF_ENABLED) {
1894                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1895                               "vf %d not enabled so no stats for it\n",
1896                               vf->abs_vfid);
1897                        continue;
1898                }
1899
1900                if (vf->malicious) {
1901                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1902                               "vf %d malicious so no stats for it\n",
1903                               vf->abs_vfid);
1904                        continue;
1905                }
1906
1907                DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1908                       "add addresses for vf %d\n", vf->abs_vfid);
1909                for_each_vfq(vf, j) {
1910                        struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1911
1912                        dma_addr_t q_stats_addr =
1913                                vf->fw_stat_map + j * vf->stats_stride;
1914
1915                        /* collect stats fro active queues only */
1916                        if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1917                            BNX2X_Q_LOGICAL_STATE_STOPPED)
1918                                continue;
1919
1920                        /* create stats query entry for this queue */
1921                        cur_query_entry->kind = STATS_TYPE_QUEUE;
1922                        cur_query_entry->index = vfq_stat_id(vf, rxq);
1923                        cur_query_entry->funcID =
1924                                cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1925                        cur_query_entry->address.hi =
1926                                cpu_to_le32(U64_HI(q_stats_addr));
1927                        cur_query_entry->address.lo =
1928                                cpu_to_le32(U64_LO(q_stats_addr));
1929                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1930                               "added address %x %x for vf %d queue %d client %d\n",
1931                               cur_query_entry->address.hi,
1932                               cur_query_entry->address.lo,
1933                               cur_query_entry->funcID,
1934                               j, cur_query_entry->index);
1935                        cur_query_entry++;
1936                        cur_data_offset += sizeof(struct per_queue_stats);
1937                        stats_count++;
1938
1939                        /* all stats are coalesced to the leading queue */
1940                        if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1941                                break;
1942                }
1943        }
1944        bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1945}
1946
1947/* VF API helpers */
1948static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1949                                u8 enable)
1950{
1951        u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1952        u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1953
1954        REG_WR(bp, reg, val);
1955}
1956
1957static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1958{
1959        int i;
1960
1961        for_each_vfq(vf, i)
1962                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1963                                    vfq_qzone_id(vf, vfq_get(vf, i)), false);
1964}
1965
1966static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1967{
1968        u32 val;
1969
1970        /* clear the VF configuration - pretend */
1971        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1972        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1973        val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1974                 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1975        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1976        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1977}
1978
1979u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1980{
1981        return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1982                     BNX2X_VF_MAX_QUEUES);
1983}
1984
1985static
1986int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1987                            struct vf_pf_resc_request *req_resc)
1988{
1989        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1990        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1991
1992        return ((req_resc->num_rxqs <= rxq_cnt) &&
1993                (req_resc->num_txqs <= txq_cnt) &&
1994                (req_resc->num_sbs <= vf_sb_count(vf))   &&
1995                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1996                (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1997}
1998
1999/* CORE VF API */
2000int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2001                     struct vf_pf_resc_request *resc)
2002{
2003        int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2004                BNX2X_CIDS_PER_VF;
2005
2006        union cdu_context *base_cxt = (union cdu_context *)
2007                BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2008                (base_vf_cid & (ILT_PAGE_CIDS-1));
2009        int i;
2010
2011        /* if state is 'acquired' the VF was not released or FLR'd, in
2012         * this case the returned resources match the acquired already
2013         * acquired resources. Verify that the requested numbers do
2014         * not exceed the already acquired numbers.
2015         */
2016        if (vf->state == VF_ACQUIRED) {
2017                DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2018                   vf->abs_vfid);
2019
2020                if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2021                        BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2022                                  vf->abs_vfid);
2023                        return -EINVAL;
2024                }
2025                return 0;
2026        }
2027
2028        /* Otherwise vf state must be 'free' or 'reset' */
2029        if (vf->state != VF_FREE && vf->state != VF_RESET) {
2030                BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2031                          vf->abs_vfid, vf->state);
2032                return -EINVAL;
2033        }
2034
2035        /* static allocation:
2036         * the global maximum number are fixed per VF. Fail the request if
2037         * requested number exceed these globals
2038         */
2039        if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2040                DP(BNX2X_MSG_IOV,
2041                   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2042                /* set the max resource in the vf */
2043                return -ENOMEM;
2044        }
2045
2046        /* Set resources counters - 0 request means max available */
2047        vf_sb_count(vf) = resc->num_sbs;
2048        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2049        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2050
2051        DP(BNX2X_MSG_IOV,
2052           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2053           vf_sb_count(vf), vf_rxq_count(vf),
2054           vf_txq_count(vf), vf_mac_rules_cnt(vf),
2055           vf_vlan_rules_cnt(vf));
2056
2057        /* Initialize the queues */
2058        if (!vf->vfqs) {
2059                DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2060                return -EINVAL;
2061        }
2062
2063        for_each_vfq(vf, i) {
2064                struct bnx2x_vf_queue *q = vfq_get(vf, i);
2065
2066                if (!q) {
2067                        BNX2X_ERR("q number %d was not allocated\n", i);
2068                        return -EINVAL;
2069                }
2070
2071                q->index = i;
2072                q->cxt = &((base_cxt + i)->eth);
2073                q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2074
2075                DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2076                   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2077
2078                /* init SP objects */
2079                bnx2x_vfq_init(bp, vf, q);
2080        }
2081        vf->state = VF_ACQUIRED;
2082        return 0;
2083}
2084
2085int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2086{
2087        struct bnx2x_func_init_params func_init = {0};
2088        int i;
2089
2090        /* the sb resources are initialized at this point, do the
2091         * FW/HW initializations
2092         */
2093        for_each_vf_sb(vf, i)
2094                bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2095                              vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2096
2097        /* Sanity checks */
2098        if (vf->state != VF_ACQUIRED) {
2099                DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2100                   vf->abs_vfid, vf->state);
2101                return -EINVAL;
2102        }
2103
2104        /* let FLR complete ... */
2105        msleep(100);
2106
2107        /* FLR cleanup epilogue */
2108        if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2109                return -EBUSY;
2110
2111        /* reset IGU VF statistics: MSIX */
2112        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2113
2114        /* function setup */
2115        func_init.pf_id = BP_FUNC(bp);
2116        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2117        bnx2x_func_init(bp, &func_init);
2118
2119        /* Enable the vf */
2120        bnx2x_vf_enable_access(bp, vf->abs_vfid);
2121        bnx2x_vf_enable_traffic(bp, vf);
2122
2123        /* queue protection table */
2124        for_each_vfq(vf, i)
2125                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2126                                    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2127
2128        vf->state = VF_ENABLED;
2129
2130        /* update vf bulletin board */
2131        bnx2x_post_vf_bulletin(bp, vf->index);
2132
2133        return 0;
2134}
2135
2136struct set_vf_state_cookie {
2137        struct bnx2x_virtf *vf;
2138        u8 state;
2139};
2140
2141static void bnx2x_set_vf_state(void *cookie)
2142{
2143        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2144
2145        p->vf->state = p->state;
2146}
2147
2148int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2149{
2150        int rc = 0, i;
2151
2152        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2153
2154        /* Close all queues */
2155        for (i = 0; i < vf_rxq_count(vf); i++) {
2156                rc = bnx2x_vf_queue_teardown(bp, vf, i);
2157                if (rc)
2158                        goto op_err;
2159        }
2160
2161        /* disable the interrupts */
2162        DP(BNX2X_MSG_IOV, "disabling igu\n");
2163        bnx2x_vf_igu_disable(bp, vf);
2164
2165        /* disable the VF */
2166        DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2167        bnx2x_vf_clr_qtbl(bp, vf);
2168
2169        /* need to make sure there are no outstanding stats ramrods which may
2170         * cause the device to access the VF's stats buffer which it will free
2171         * as soon as we return from the close flow.
2172         */
2173        {
2174                struct set_vf_state_cookie cookie;
2175
2176                cookie.vf = vf;
2177                cookie.state = VF_ACQUIRED;
2178                rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2179                if (rc)
2180                        goto op_err;
2181        }
2182
2183        DP(BNX2X_MSG_IOV, "set state to acquired\n");
2184
2185        return 0;
2186op_err:
2187        BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2188        return rc;
2189}
2190
2191/* VF release can be called either: 1. The VF was acquired but
2192 * not enabled 2. the vf was enabled or in the process of being
2193 * enabled
2194 */
2195int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2196{
2197        int rc;
2198
2199        DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2200           vf->state == VF_FREE ? "Free" :
2201           vf->state == VF_ACQUIRED ? "Acquired" :
2202           vf->state == VF_ENABLED ? "Enabled" :
2203           vf->state == VF_RESET ? "Reset" :
2204           "Unknown");
2205
2206        switch (vf->state) {
2207        case VF_ENABLED:
2208                rc = bnx2x_vf_close(bp, vf);
2209                if (rc)
2210                        goto op_err;
2211                fallthrough;    /* to release resources */
2212        case VF_ACQUIRED:
2213                DP(BNX2X_MSG_IOV, "about to free resources\n");
2214                bnx2x_vf_free_resc(bp, vf);
2215                break;
2216
2217        case VF_FREE:
2218        case VF_RESET:
2219        default:
2220                break;
2221        }
2222        return 0;
2223op_err:
2224        BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2225        return rc;
2226}
2227
2228int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2229                        struct bnx2x_config_rss_params *rss)
2230{
2231        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2232        set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2233        return bnx2x_config_rss(bp, rss);
2234}
2235
2236int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2237                        struct vfpf_tpa_tlv *tlv,
2238                        struct bnx2x_queue_update_tpa_params *params)
2239{
2240        aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2241        struct bnx2x_queue_state_params qstate;
2242        int qid, rc = 0;
2243
2244        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2245
2246        /* Set ramrod params */
2247        memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2248        memcpy(&qstate.params.update_tpa, params,
2249               sizeof(struct bnx2x_queue_update_tpa_params));
2250        qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2251        set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2252
2253        for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2254                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2255                qstate.params.update_tpa.sge_map = sge_addr[qid];
2256                DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2257                   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2258                   U64_LO(sge_addr[qid]));
2259                rc = bnx2x_queue_state_change(bp, &qstate);
2260                if (rc) {
2261                        BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2262                                  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2263                                  vf->abs_vfid, qid);
2264                        return rc;
2265                }
2266        }
2267
2268        return rc;
2269}
2270
2271/* VF release ~ VF close + VF release-resources
2272 * Release is the ultimate SW shutdown and is called whenever an
2273 * irrecoverable error is encountered.
2274 */
2275int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2276{
2277        int rc;
2278
2279        DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2280        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2281
2282        rc = bnx2x_vf_free(bp, vf);
2283        if (rc)
2284                WARN(rc,
2285                     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2286                     vf->abs_vfid, rc);
2287        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2288        return rc;
2289}
2290
2291void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2292                              enum channel_tlvs tlv)
2293{
2294        /* we don't lock the channel for unsupported tlvs */
2295        if (!bnx2x_tlv_supported(tlv)) {
2296                BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2297                return;
2298        }
2299
2300        /* lock the channel */
2301        mutex_lock(&vf->op_mutex);
2302
2303        /* record the locking op */
2304        vf->op_current = tlv;
2305
2306        /* log the lock */
2307        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2308           vf->abs_vfid, tlv);
2309}
2310
2311void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2312                                enum channel_tlvs expected_tlv)
2313{
2314        enum channel_tlvs current_tlv;
2315
2316        if (!vf) {
2317                BNX2X_ERR("VF was %p\n", vf);
2318                return;
2319        }
2320
2321        current_tlv = vf->op_current;
2322
2323        /* we don't unlock the channel for unsupported tlvs */
2324        if (!bnx2x_tlv_supported(expected_tlv))
2325                return;
2326
2327        WARN(expected_tlv != vf->op_current,
2328             "lock mismatch: expected %d found %d", expected_tlv,
2329             vf->op_current);
2330
2331        /* record the locking op */
2332        vf->op_current = CHANNEL_TLV_NONE;
2333
2334        /* lock the channel */
2335        mutex_unlock(&vf->op_mutex);
2336
2337        /* log the unlock */
2338        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2339           vf->abs_vfid, current_tlv);
2340}
2341
2342static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2343{
2344        struct bnx2x_queue_state_params q_params;
2345        u32 prev_flags;
2346        int i, rc;
2347
2348        /* Verify changes are needed and record current Tx switching state */
2349        prev_flags = bp->flags;
2350        if (enable)
2351                bp->flags |= TX_SWITCHING;
2352        else
2353                bp->flags &= ~TX_SWITCHING;
2354        if (prev_flags == bp->flags)
2355                return 0;
2356
2357        /* Verify state enables the sending of queue ramrods */
2358        if ((bp->state != BNX2X_STATE_OPEN) ||
2359            (bnx2x_get_q_logical_state(bp,
2360                                      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2361             BNX2X_Q_LOGICAL_STATE_ACTIVE))
2362                return 0;
2363
2364        /* send q. update ramrod to configure Tx switching */
2365        memset(&q_params, 0, sizeof(q_params));
2366        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2367        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2368        __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2369                  &q_params.params.update.update_flags);
2370        if (enable)
2371                __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2372                          &q_params.params.update.update_flags);
2373        else
2374                __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2375                            &q_params.params.update.update_flags);
2376
2377        /* send the ramrod on all the queues of the PF */
2378        for_each_eth_queue(bp, i) {
2379                struct bnx2x_fastpath *fp = &bp->fp[i];
2380                int tx_idx;
2381
2382                /* Set the appropriate Queue object */
2383                q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2384
2385                for (tx_idx = FIRST_TX_COS_INDEX;
2386                     tx_idx < fp->max_cos; tx_idx++) {
2387                        q_params.params.update.cid_index = tx_idx;
2388
2389                        /* Update the Queue state */
2390                        rc = bnx2x_queue_state_change(bp, &q_params);
2391                        if (rc) {
2392                                BNX2X_ERR("Failed to configure Tx switching\n");
2393                                return rc;
2394                        }
2395                }
2396        }
2397
2398        DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2399        return 0;
2400}
2401
2402int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2403{
2404        struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2405
2406        if (!IS_SRIOV(bp)) {
2407                BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2408                return -EINVAL;
2409        }
2410
2411        DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2412           num_vfs_param, BNX2X_NR_VIRTFN(bp));
2413
2414        /* HW channel is only operational when PF is up */
2415        if (bp->state != BNX2X_STATE_OPEN) {
2416                BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2417                return -EINVAL;
2418        }
2419
2420        /* we are always bound by the total_vfs in the configuration space */
2421        if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2422                BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2423                          num_vfs_param, BNX2X_NR_VIRTFN(bp));
2424                num_vfs_param = BNX2X_NR_VIRTFN(bp);
2425        }
2426
2427        bp->requested_nr_virtfn = num_vfs_param;
2428        if (num_vfs_param == 0) {
2429                bnx2x_set_pf_tx_switching(bp, false);
2430                bnx2x_disable_sriov(bp);
2431                return 0;
2432        } else {
2433                return bnx2x_enable_sriov(bp);
2434        }
2435}
2436
2437#define IGU_ENTRY_SIZE 4
2438
2439int bnx2x_enable_sriov(struct bnx2x *bp)
2440{
2441        int rc = 0, req_vfs = bp->requested_nr_virtfn;
2442        int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2443        u32 igu_entry, address;
2444        u16 num_vf_queues;
2445
2446        if (req_vfs == 0)
2447                return 0;
2448
2449        first_vf = bp->vfdb->sriov.first_vf_in_pf;
2450
2451        /* statically distribute vf sb pool between VFs */
2452        num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2453                              BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2454
2455        /* zero previous values learned from igu cam */
2456        for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2457                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2458
2459                vf->sb_count = 0;
2460                vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2461        }
2462        bp->vfdb->vf_sbs_pool = 0;
2463
2464        /* prepare IGU cam */
2465        sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2466        address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2467        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2468                for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2469                        igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2470                                vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2471                                IGU_REG_MAPPING_MEMORY_VALID;
2472                        DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2473                           sb_idx, vf_idx);
2474                        REG_WR(bp, address, igu_entry);
2475                        sb_idx++;
2476                        address += IGU_ENTRY_SIZE;
2477                }
2478        }
2479
2480        /* Reinitialize vf database according to igu cam */
2481        bnx2x_get_vf_igu_cam_info(bp);
2482
2483        DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2484           BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2485
2486        qcount = 0;
2487        for_each_vf(bp, vf_idx) {
2488                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2489
2490                /* set local queue arrays */
2491                vf->vfqs = &bp->vfdb->vfqs[qcount];
2492                qcount += vf_sb_count(vf);
2493                bnx2x_iov_static_resc(bp, vf);
2494        }
2495
2496        /* prepare msix vectors in VF configuration space - the value in the
2497         * PCI configuration space should be the index of the last entry,
2498         * namely one less than the actual size of the table
2499         */
2500        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2501                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2502                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2503                       num_vf_queues - 1);
2504                DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2505                   vf_idx, num_vf_queues - 1);
2506        }
2507        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2508
2509        /* enable sriov. This will probe all the VFs, and consequentially cause
2510         * the "acquire" messages to appear on the VF PF channel.
2511         */
2512        DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2513        bnx2x_disable_sriov(bp);
2514
2515        rc = bnx2x_set_pf_tx_switching(bp, true);
2516        if (rc)
2517                return rc;
2518
2519        rc = pci_enable_sriov(bp->pdev, req_vfs);
2520        if (rc) {
2521                BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2522                return rc;
2523        }
2524        DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2525        return req_vfs;
2526}
2527
2528void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2529{
2530        int vfidx;
2531        struct pf_vf_bulletin_content *bulletin;
2532
2533        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2534        for_each_vf(bp, vfidx) {
2535                bulletin = BP_VF_BULLETIN(bp, vfidx);
2536                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2537                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2538                                          htons(ETH_P_8021Q));
2539        }
2540}
2541
2542void bnx2x_disable_sriov(struct bnx2x *bp)
2543{
2544        if (pci_vfs_assigned(bp->pdev)) {
2545                DP(BNX2X_MSG_IOV,
2546                   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2547                return;
2548        }
2549
2550        pci_disable_sriov(bp->pdev);
2551}
2552
2553static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2554                            struct bnx2x_virtf **vf,
2555                            struct pf_vf_bulletin_content **bulletin,
2556                            bool test_queue)
2557{
2558        if (bp->state != BNX2X_STATE_OPEN) {
2559                BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2560                return -EINVAL;
2561        }
2562
2563        if (!IS_SRIOV(bp)) {
2564                BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2565                return -EINVAL;
2566        }
2567
2568        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2569                BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2570                          vfidx, BNX2X_NR_VIRTFN(bp));
2571                return -EINVAL;
2572        }
2573
2574        /* init members */
2575        *vf = BP_VF(bp, vfidx);
2576        *bulletin = BP_VF_BULLETIN(bp, vfidx);
2577
2578        if (!*vf) {
2579                BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2580                return -EINVAL;
2581        }
2582
2583        if (test_queue && !(*vf)->vfqs) {
2584                BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2585                          vfidx);
2586                return -EINVAL;
2587        }
2588
2589        if (!*bulletin) {
2590                BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2591                          vfidx);
2592                return -EINVAL;
2593        }
2594
2595        return 0;
2596}
2597
2598int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2599                        struct ifla_vf_info *ivi)
2600{
2601        struct bnx2x *bp = netdev_priv(dev);
2602        struct bnx2x_virtf *vf = NULL;
2603        struct pf_vf_bulletin_content *bulletin = NULL;
2604        struct bnx2x_vlan_mac_obj *mac_obj;
2605        struct bnx2x_vlan_mac_obj *vlan_obj;
2606        int rc;
2607
2608        /* sanity and init */
2609        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2610        if (rc)
2611                return rc;
2612
2613        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2614        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2615        if (!mac_obj || !vlan_obj) {
2616                BNX2X_ERR("VF partially initialized\n");
2617                return -EINVAL;
2618        }
2619
2620        ivi->vf = vfidx;
2621        ivi->qos = 0;
2622        ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2623        ivi->min_tx_rate = 0;
2624        ivi->spoofchk = vf->spoofchk ? 1 : 0;
2625        ivi->linkstate = vf->link_cfg;
2626        if (vf->state == VF_ENABLED) {
2627                /* mac and vlan are in vlan_mac objects */
2628                if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2629                        mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2630                                                0, ETH_ALEN);
2631                        vlan_obj->get_n_elements(bp, vlan_obj, 1,
2632                                                 (u8 *)&ivi->vlan, 0,
2633                                                 VLAN_HLEN);
2634                }
2635        } else {
2636                mutex_lock(&bp->vfdb->bulletin_mutex);
2637                /* mac */
2638                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2639                        /* mac configured by ndo so its in bulletin board */
2640                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2641                else
2642                        /* function has not been loaded yet. Show mac as 0s */
2643                        eth_zero_addr(ivi->mac);
2644
2645                /* vlan */
2646                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2647                        /* vlan configured by ndo so its in bulletin board */
2648                        memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2649                else
2650                        /* function has not been loaded yet. Show vlans as 0s */
2651                        memset(&ivi->vlan, 0, VLAN_HLEN);
2652
2653                mutex_unlock(&bp->vfdb->bulletin_mutex);
2654        }
2655
2656        return 0;
2657}
2658
2659/* New mac for VF. Consider these cases:
2660 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2661 *    supply at acquire.
2662 * 2. VF has already been acquired but has not yet initialized - store in local
2663 *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2664 *    will configure this mac when it is ready.
2665 * 3. VF has already initialized but has not yet setup a queue - post the new
2666 *    mac on VF's bulletin board right now. VF will configure this mac when it
2667 *    is ready.
2668 * 4. VF has already set a queue - delete any macs already configured for this
2669 *    queue and manually config the new mac.
2670 * In any event, once this function has been called refuse any attempts by the
2671 * VF to configure any mac for itself except for this mac. In case of a race
2672 * where the VF fails to see the new post on its bulletin board before sending a
2673 * mac configuration request, the PF will simply fail the request and VF can try
2674 * again after consulting its bulletin board.
2675 */
2676int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2677{
2678        struct bnx2x *bp = netdev_priv(dev);
2679        int rc, q_logical_state;
2680        struct bnx2x_virtf *vf = NULL;
2681        struct pf_vf_bulletin_content *bulletin = NULL;
2682
2683        if (!is_valid_ether_addr(mac)) {
2684                BNX2X_ERR("mac address invalid\n");
2685                return -EINVAL;
2686        }
2687
2688        /* sanity and init */
2689        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2690        if (rc)
2691                return rc;
2692
2693        mutex_lock(&bp->vfdb->bulletin_mutex);
2694
2695        /* update PF's copy of the VF's bulletin. Will no longer accept mac
2696         * configuration requests from vf unless match this mac
2697         */
2698        bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2699        memcpy(bulletin->mac, mac, ETH_ALEN);
2700
2701        /* Post update on VF's bulletin board */
2702        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2703
2704        /* release lock before checking return code */
2705        mutex_unlock(&bp->vfdb->bulletin_mutex);
2706
2707        if (rc) {
2708                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2709                return rc;
2710        }
2711
2712        q_logical_state =
2713                bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2714        if (vf->state == VF_ENABLED &&
2715            q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2716                /* configure the mac in device on this vf's queue */
2717                unsigned long ramrod_flags = 0;
2718                struct bnx2x_vlan_mac_obj *mac_obj;
2719
2720                /* User should be able to see failure reason in system logs */
2721                if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2722                        return -EINVAL;
2723
2724                /* must lock vfpf channel to protect against vf flows */
2725                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2726
2727                /* remove existing eth macs */
2728                mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2729                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2730                if (rc) {
2731                        BNX2X_ERR("failed to delete eth macs\n");
2732                        rc = -EINVAL;
2733                        goto out;
2734                }
2735
2736                /* remove existing uc list macs */
2737                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2738                if (rc) {
2739                        BNX2X_ERR("failed to delete uc_list macs\n");
2740                        rc = -EINVAL;
2741                        goto out;
2742                }
2743
2744                /* configure the new mac to device */
2745                __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2746                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2747                                  BNX2X_ETH_MAC, &ramrod_flags);
2748
2749out:
2750                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2751        }
2752
2753        return rc;
2754}
2755
2756static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2757                                         struct bnx2x_virtf *vf, bool accept)
2758{
2759        struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2760        unsigned long accept_flags;
2761
2762        /* need to remove/add the VF's accept_any_vlan bit */
2763        accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2764        if (accept)
2765                set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2766        else
2767                clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2768
2769        bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2770                              accept_flags);
2771        bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2772        bnx2x_config_rx_mode(bp, &rx_ramrod);
2773}
2774
2775static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2776                                    u16 vlan, bool add)
2777{
2778        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2779        unsigned long ramrod_flags = 0;
2780        int rc = 0;
2781
2782        /* configure the new vlan to device */
2783        memset(&ramrod_param, 0, sizeof(ramrod_param));
2784        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2785        ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2786        ramrod_param.ramrod_flags = ramrod_flags;
2787        ramrod_param.user_req.u.vlan.vlan = vlan;
2788        ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2789                                        : BNX2X_VLAN_MAC_DEL;
2790        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2791        if (rc) {
2792                BNX2X_ERR("failed to configure vlan\n");
2793                return -EINVAL;
2794        }
2795
2796        return 0;
2797}
2798
2799int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2800                      __be16 vlan_proto)
2801{
2802        struct pf_vf_bulletin_content *bulletin = NULL;
2803        struct bnx2x *bp = netdev_priv(dev);
2804        struct bnx2x_vlan_mac_obj *vlan_obj;
2805        unsigned long vlan_mac_flags = 0;
2806        unsigned long ramrod_flags = 0;
2807        struct bnx2x_virtf *vf = NULL;
2808        int i, rc;
2809
2810        if (vlan > 4095) {
2811                BNX2X_ERR("illegal vlan value %d\n", vlan);
2812                return -EINVAL;
2813        }
2814
2815        if (vlan_proto != htons(ETH_P_8021Q))
2816                return -EPROTONOSUPPORT;
2817
2818        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2819           vfidx, vlan, 0);
2820
2821        /* sanity and init */
2822        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2823        if (rc)
2824                return rc;
2825
2826        /* update PF's copy of the VF's bulletin. No point in posting the vlan
2827         * to the VF since it doesn't have anything to do with it. But it useful
2828         * to store it here in case the VF is not up yet and we can only
2829         * configure the vlan later when it does. Treat vlan id 0 as remove the
2830         * Host tag.
2831         */
2832        mutex_lock(&bp->vfdb->bulletin_mutex);
2833
2834        if (vlan > 0)
2835                bulletin->valid_bitmap |= 1 << VLAN_VALID;
2836        else
2837                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2838        bulletin->vlan = vlan;
2839
2840        /* Post update on VF's bulletin board */
2841        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2842        if (rc)
2843                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2844        mutex_unlock(&bp->vfdb->bulletin_mutex);
2845
2846        /* is vf initialized and queue set up? */
2847        if (vf->state != VF_ENABLED ||
2848            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2849            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2850                return rc;
2851
2852        /* User should be able to see error in system logs */
2853        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2854                return -EINVAL;
2855
2856        /* must lock vfpf channel to protect against vf flows */
2857        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2858
2859        /* remove existing vlans */
2860        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2861        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2862        rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2863                                  &ramrod_flags);
2864        if (rc) {
2865                BNX2X_ERR("failed to delete vlans\n");
2866                rc = -EINVAL;
2867                goto out;
2868        }
2869
2870        /* clear accept_any_vlan when HV forces vlan, otherwise
2871         * according to VF capabilities
2872         */
2873        if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2874                bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2875
2876        rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2877        if (rc)
2878                goto out;
2879
2880        /* send queue update ramrods to configure default vlan and
2881         * silent vlan removal
2882         */
2883        for_each_vfq(vf, i) {
2884                struct bnx2x_queue_state_params q_params = {NULL};
2885                struct bnx2x_queue_update_params *update_params;
2886
2887                q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2888
2889                /* validate the Q is UP */
2890                if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2891                    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2892                        continue;
2893
2894                __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2895                q_params.cmd = BNX2X_Q_CMD_UPDATE;
2896                update_params = &q_params.params.update;
2897                __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2898                          &update_params->update_flags);
2899                __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2900                          &update_params->update_flags);
2901                if (vlan == 0) {
2902                        /* if vlan is 0 then we want to leave the VF traffic
2903                         * untagged, and leave the incoming traffic untouched
2904                         * (i.e. do not remove any vlan tags).
2905                         */
2906                        __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2907                                    &update_params->update_flags);
2908                        __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2909                                    &update_params->update_flags);
2910                } else {
2911                        /* configure default vlan to vf queue and set silent
2912                         * vlan removal (the vf remains unaware of this vlan).
2913                         */
2914                        __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2915                                  &update_params->update_flags);
2916                        __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2917                                  &update_params->update_flags);
2918                        update_params->def_vlan = vlan;
2919                        update_params->silent_removal_value =
2920                                vlan & VLAN_VID_MASK;
2921                        update_params->silent_removal_mask = VLAN_VID_MASK;
2922                }
2923
2924                /* Update the Queue state */
2925                rc = bnx2x_queue_state_change(bp, &q_params);
2926                if (rc) {
2927                        BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2928                                  i);
2929                        goto out;
2930                }
2931        }
2932out:
2933        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2934
2935        if (rc)
2936                DP(BNX2X_MSG_IOV,
2937                   "updated VF[%d] vlan configuration (vlan = %d)\n",
2938                   vfidx, vlan);
2939
2940        return rc;
2941}
2942
2943int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
2944{
2945        struct bnx2x *bp = netdev_priv(dev);
2946        struct bnx2x_virtf *vf;
2947        int i, rc = 0;
2948
2949        vf = BP_VF(bp, idx);
2950        if (!vf)
2951                return -EINVAL;
2952
2953        /* nothing to do */
2954        if (vf->spoofchk == val)
2955                return 0;
2956
2957        vf->spoofchk = val ? 1 : 0;
2958
2959        DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
2960           val ? "enabling" : "disabling", idx);
2961
2962        /* is vf initialized and queue set up? */
2963        if (vf->state != VF_ENABLED ||
2964            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2965            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2966                return rc;
2967
2968        /* User should be able to see error in system logs */
2969        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2970                return -EINVAL;
2971
2972        /* send queue update ramrods to configure spoofchk */
2973        for_each_vfq(vf, i) {
2974                struct bnx2x_queue_state_params q_params = {NULL};
2975                struct bnx2x_queue_update_params *update_params;
2976
2977                q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2978
2979                /* validate the Q is UP */
2980                if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2981                    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2982                        continue;
2983
2984                __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2985                q_params.cmd = BNX2X_Q_CMD_UPDATE;
2986                update_params = &q_params.params.update;
2987                __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
2988                          &update_params->update_flags);
2989                if (val) {
2990                        __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
2991                                  &update_params->update_flags);
2992                } else {
2993                        __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
2994                                    &update_params->update_flags);
2995                }
2996
2997                /* Update the Queue state */
2998                rc = bnx2x_queue_state_change(bp, &q_params);
2999                if (rc) {
3000                        BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
3001                                  val ? "enable" : "disable", idx, i);
3002                        goto out;
3003                }
3004        }
3005out:
3006        if (!rc)
3007                DP(BNX2X_MSG_IOV,
3008                   "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
3009                   idx);
3010
3011        return rc;
3012}
3013
3014/* crc is the first field in the bulletin board. Compute the crc over the
3015 * entire bulletin board excluding the crc field itself. Use the length field
3016 * as the Bulletin Board was posted by a PF with possibly a different version
3017 * from the vf which will sample it. Therefore, the length is computed by the
3018 * PF and then used blindly by the VF.
3019 */
3020u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
3021{
3022        return crc32(BULLETIN_CRC_SEED,
3023                 ((u8 *)bulletin) + sizeof(bulletin->crc),
3024                 bulletin->length - sizeof(bulletin->crc));
3025}
3026
3027/* Check for new posts on the bulletin board */
3028enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3029{
3030        struct pf_vf_bulletin_content *bulletin;
3031        int attempts;
3032
3033        /* sampling structure in mid post may result with corrupted data
3034         * validate crc to ensure coherency.
3035         */
3036        for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3037                u32 crc;
3038
3039                /* sample the bulletin board */
3040                memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
3041                       sizeof(union pf_vf_bulletin));
3042
3043                crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
3044
3045                if (bp->shadow_bulletin.content.crc == crc)
3046                        break;
3047
3048                BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3049                          bp->shadow_bulletin.content.crc, crc);
3050        }
3051
3052        if (attempts >= BULLETIN_ATTEMPTS) {
3053                BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3054                          attempts);
3055                return PFVF_BULLETIN_CRC_ERR;
3056        }
3057        bulletin = &bp->shadow_bulletin.content;
3058
3059        /* bulletin board hasn't changed since last sample */
3060        if (bp->old_bulletin.version == bulletin->version)
3061                return PFVF_BULLETIN_UNCHANGED;
3062
3063        /* the mac address in bulletin board is valid and is new */
3064        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3065            !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3066                /* update new mac to net device */
3067                memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3068        }
3069
3070        if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3071                DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3072                   bulletin->link_speed, bulletin->link_flags);
3073
3074                bp->vf_link_vars.line_speed = bulletin->link_speed;
3075                bp->vf_link_vars.link_report_flags = 0;
3076                /* Link is down */
3077                if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3078                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3079                                  &bp->vf_link_vars.link_report_flags);
3080                /* Full DUPLEX */
3081                if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3082                        __set_bit(BNX2X_LINK_REPORT_FD,
3083                                  &bp->vf_link_vars.link_report_flags);
3084                /* Rx Flow Control is ON */
3085                if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3086                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3087                                  &bp->vf_link_vars.link_report_flags);
3088                /* Tx Flow Control is ON */
3089                if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3090                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3091                                  &bp->vf_link_vars.link_report_flags);
3092                __bnx2x_link_report(bp);
3093        }
3094
3095        /* copy new bulletin board to bp */
3096        memcpy(&bp->old_bulletin, bulletin,
3097               sizeof(struct pf_vf_bulletin_content));
3098
3099        return PFVF_BULLETIN_UPDATED;
3100}
3101
3102void bnx2x_timer_sriov(struct bnx2x *bp)
3103{
3104        bnx2x_sample_bulletin(bp);
3105
3106        /* if channel is down we need to self destruct */
3107        if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3108                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3109                                       BNX2X_MSG_IOV);
3110}
3111
3112void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3113{
3114        /* vf doorbells are embedded within the regview */
3115        return bp->regview + PXP_VF_ADDR_DB_START;
3116}
3117
3118void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3119{
3120        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3121                       sizeof(struct bnx2x_vf_mbx_msg));
3122        BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3123                       sizeof(union pf_vf_bulletin));
3124}
3125
3126int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3127{
3128        mutex_init(&bp->vf2pf_mutex);
3129
3130        /* allocate vf2pf mailbox for vf to pf channel */
3131        bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3132                                         sizeof(struct bnx2x_vf_mbx_msg));
3133        if (!bp->vf2pf_mbox)
3134                goto alloc_mem_err;
3135
3136        /* allocate pf 2 vf bulletin board */
3137        bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3138                                             sizeof(union pf_vf_bulletin));
3139        if (!bp->pf2vf_bulletin)
3140                goto alloc_mem_err;
3141
3142        bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3143
3144        return 0;
3145
3146alloc_mem_err:
3147        bnx2x_vf_pci_dealloc(bp);
3148        return -ENOMEM;
3149}
3150
3151void bnx2x_iov_channel_down(struct bnx2x *bp)
3152{
3153        int vf_idx;
3154        struct pf_vf_bulletin_content *bulletin;
3155
3156        if (!IS_SRIOV(bp))
3157                return;
3158
3159        for_each_vf(bp, vf_idx) {
3160                /* locate this VFs bulletin board and update the channel down
3161                 * bit
3162                 */
3163                bulletin = BP_VF_BULLETIN(bp, vf_idx);
3164                bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3165
3166                /* update vf bulletin board */
3167                bnx2x_post_vf_bulletin(bp, vf_idx);
3168        }
3169}
3170
3171void bnx2x_iov_task(struct work_struct *work)
3172{
3173        struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3174
3175        if (!netif_running(bp->dev))
3176                return;
3177
3178        if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3179                               &bp->iov_task_state))
3180                bnx2x_vf_handle_flr_event(bp);
3181
3182        if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3183                               &bp->iov_task_state))
3184                bnx2x_vf_mbx(bp);
3185}
3186
3187void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3188{
3189        smp_mb__before_atomic();
3190        set_bit(flag, &bp->iov_task_state);
3191        smp_mb__after_atomic();
3192        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3193        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3194}
3195