linux/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_fltr.h"
   8#include "ice_flow.h"
   9#include "ice_virtchnl_allowlist.h"
  10
  11#define FIELD_SELECTOR(proto_hdr_field) \
  12                BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
  13
  14struct ice_vc_hdr_match_type {
  15        u32 vc_hdr;     /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
  16        u32 ice_hdr;    /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
  17};
  18
  19static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
  20        {VIRTCHNL_PROTO_HDR_NONE,       ICE_FLOW_SEG_HDR_NONE},
  21        {VIRTCHNL_PROTO_HDR_IPV4,       ICE_FLOW_SEG_HDR_IPV4 |
  22                                        ICE_FLOW_SEG_HDR_IPV_OTHER},
  23        {VIRTCHNL_PROTO_HDR_IPV6,       ICE_FLOW_SEG_HDR_IPV6 |
  24                                        ICE_FLOW_SEG_HDR_IPV_OTHER},
  25        {VIRTCHNL_PROTO_HDR_TCP,        ICE_FLOW_SEG_HDR_TCP},
  26        {VIRTCHNL_PROTO_HDR_UDP,        ICE_FLOW_SEG_HDR_UDP},
  27        {VIRTCHNL_PROTO_HDR_SCTP,       ICE_FLOW_SEG_HDR_SCTP},
  28};
  29
  30static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
  31        {VIRTCHNL_PROTO_HDR_NONE,       ICE_FLOW_SEG_HDR_NONE},
  32        {VIRTCHNL_PROTO_HDR_ETH,        ICE_FLOW_SEG_HDR_ETH},
  33        {VIRTCHNL_PROTO_HDR_S_VLAN,     ICE_FLOW_SEG_HDR_VLAN},
  34        {VIRTCHNL_PROTO_HDR_C_VLAN,     ICE_FLOW_SEG_HDR_VLAN},
  35        {VIRTCHNL_PROTO_HDR_IPV4,       ICE_FLOW_SEG_HDR_IPV4 |
  36                                        ICE_FLOW_SEG_HDR_IPV_OTHER},
  37        {VIRTCHNL_PROTO_HDR_IPV6,       ICE_FLOW_SEG_HDR_IPV6 |
  38                                        ICE_FLOW_SEG_HDR_IPV_OTHER},
  39        {VIRTCHNL_PROTO_HDR_TCP,        ICE_FLOW_SEG_HDR_TCP},
  40        {VIRTCHNL_PROTO_HDR_UDP,        ICE_FLOW_SEG_HDR_UDP},
  41        {VIRTCHNL_PROTO_HDR_SCTP,       ICE_FLOW_SEG_HDR_SCTP},
  42        {VIRTCHNL_PROTO_HDR_PPPOE,      ICE_FLOW_SEG_HDR_PPPOE},
  43        {VIRTCHNL_PROTO_HDR_GTPU_IP,    ICE_FLOW_SEG_HDR_GTPU_IP},
  44        {VIRTCHNL_PROTO_HDR_GTPU_EH,    ICE_FLOW_SEG_HDR_GTPU_EH},
  45        {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
  46                                        ICE_FLOW_SEG_HDR_GTPU_DWN},
  47        {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
  48                                        ICE_FLOW_SEG_HDR_GTPU_UP},
  49        {VIRTCHNL_PROTO_HDR_L2TPV3,     ICE_FLOW_SEG_HDR_L2TPV3},
  50        {VIRTCHNL_PROTO_HDR_ESP,        ICE_FLOW_SEG_HDR_ESP},
  51        {VIRTCHNL_PROTO_HDR_AH,         ICE_FLOW_SEG_HDR_AH},
  52        {VIRTCHNL_PROTO_HDR_PFCP,       ICE_FLOW_SEG_HDR_PFCP_SESSION},
  53};
  54
  55struct ice_vc_hash_field_match_type {
  56        u32 vc_hdr;             /* virtchnl headers
  57                                 * (VIRTCHNL_PROTO_HDR_XXX)
  58                                 */
  59        u32 vc_hash_field;      /* virtchnl hash fields selector
  60                                 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
  61                                 */
  62        u64 ice_hash_field;     /* ice hash fields
  63                                 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
  64                                 */
  65};
  66
  67static const struct
  68ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
  69        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
  70                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
  71        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
  72                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
  73        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
  74                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
  75                ICE_FLOW_HASH_IPV4},
  76        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
  77                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
  78                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
  79                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
  80        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
  81                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
  82                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
  83                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
  84        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
  85                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
  86                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
  87                ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
  88        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
  89                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
  90        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
  91                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
  92        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
  93                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
  94        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
  95                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
  96                ICE_FLOW_HASH_IPV6},
  97        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
  98                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
  99                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
 100                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 101        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
 102                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 103                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
 104                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 105        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
 106                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
 107                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 108                ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 109        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 110                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 111        {VIRTCHNL_PROTO_HDR_TCP,
 112                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
 113                BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
 114        {VIRTCHNL_PROTO_HDR_TCP,
 115                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
 116                BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
 117        {VIRTCHNL_PROTO_HDR_TCP,
 118                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
 119                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
 120                ICE_FLOW_HASH_TCP_PORT},
 121        {VIRTCHNL_PROTO_HDR_UDP,
 122                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
 123                BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
 124        {VIRTCHNL_PROTO_HDR_UDP,
 125                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
 126                BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
 127        {VIRTCHNL_PROTO_HDR_UDP,
 128                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
 129                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
 130                ICE_FLOW_HASH_UDP_PORT},
 131        {VIRTCHNL_PROTO_HDR_SCTP,
 132                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
 133                BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
 134        {VIRTCHNL_PROTO_HDR_SCTP,
 135                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
 136                BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
 137        {VIRTCHNL_PROTO_HDR_SCTP,
 138                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
 139                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
 140                ICE_FLOW_HASH_SCTP_PORT},
 141};
 142
 143static const struct
 144ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
 145        {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
 146                BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
 147        {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
 148                BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
 149        {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
 150                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
 151                ICE_FLOW_HASH_ETH},
 152        {VIRTCHNL_PROTO_HDR_ETH,
 153                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
 154                BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
 155        {VIRTCHNL_PROTO_HDR_S_VLAN,
 156                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
 157                BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
 158        {VIRTCHNL_PROTO_HDR_C_VLAN,
 159                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
 160                BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
 161        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
 162                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
 163        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
 164                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
 165        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
 166                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
 167                ICE_FLOW_HASH_IPV4},
 168        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
 169                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
 170                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
 171                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
 172        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
 173                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
 174                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
 175                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
 176        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
 177                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
 178                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
 179                ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
 180        {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
 181                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
 182        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
 183                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
 184        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
 185                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
 186        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
 187                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
 188                ICE_FLOW_HASH_IPV6},
 189        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
 190                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 191                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
 192                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 193        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
 194                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 195                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
 196                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 197        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
 198                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
 199                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 200                ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 201        {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
 202                BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
 203        {VIRTCHNL_PROTO_HDR_TCP,
 204                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
 205                BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
 206        {VIRTCHNL_PROTO_HDR_TCP,
 207                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
 208                BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
 209        {VIRTCHNL_PROTO_HDR_TCP,
 210                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
 211                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
 212                ICE_FLOW_HASH_TCP_PORT},
 213        {VIRTCHNL_PROTO_HDR_UDP,
 214                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
 215                BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
 216        {VIRTCHNL_PROTO_HDR_UDP,
 217                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
 218                BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
 219        {VIRTCHNL_PROTO_HDR_UDP,
 220                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
 221                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
 222                ICE_FLOW_HASH_UDP_PORT},
 223        {VIRTCHNL_PROTO_HDR_SCTP,
 224                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
 225                BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
 226        {VIRTCHNL_PROTO_HDR_SCTP,
 227                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
 228                BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
 229        {VIRTCHNL_PROTO_HDR_SCTP,
 230                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
 231                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
 232                ICE_FLOW_HASH_SCTP_PORT},
 233        {VIRTCHNL_PROTO_HDR_PPPOE,
 234                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
 235                BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
 236        {VIRTCHNL_PROTO_HDR_GTPU_IP,
 237                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
 238                BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
 239        {VIRTCHNL_PROTO_HDR_L2TPV3,
 240                FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
 241                BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
 242        {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
 243                BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
 244        {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
 245                BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
 246        {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
 247                BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
 248};
 249
 250/**
 251 * ice_get_vf_vsi - get VF's VSI based on the stored index
 252 * @vf: VF used to get VSI
 253 */
 254static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
 255{
 256        return vf->pf->vsi[vf->lan_vsi_idx];
 257}
 258
 259/**
 260 * ice_validate_vf_id - helper to check if VF ID is valid
 261 * @pf: pointer to the PF structure
 262 * @vf_id: the ID of the VF to check
 263 */
 264static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
 265{
 266        /* vf_id range is only valid for 0-255, and should always be unsigned */
 267        if (vf_id >= pf->num_alloc_vfs) {
 268                dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
 269                return -EINVAL;
 270        }
 271        return 0;
 272}
 273
 274/**
 275 * ice_check_vf_init - helper to check if VF init complete
 276 * @pf: pointer to the PF structure
 277 * @vf: the pointer to the VF to check
 278 */
 279static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
 280{
 281        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
 282                dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
 283                        vf->vf_id);
 284                return -EBUSY;
 285        }
 286        return 0;
 287}
 288
 289/**
 290 * ice_err_to_virt_err - translate errors for VF return code
 291 * @ice_err: error return code
 292 */
 293static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
 294{
 295        switch (ice_err) {
 296        case ICE_SUCCESS:
 297                return VIRTCHNL_STATUS_SUCCESS;
 298        case ICE_ERR_BAD_PTR:
 299        case ICE_ERR_INVAL_SIZE:
 300        case ICE_ERR_DEVICE_NOT_SUPPORTED:
 301        case ICE_ERR_PARAM:
 302        case ICE_ERR_CFG:
 303                return VIRTCHNL_STATUS_ERR_PARAM;
 304        case ICE_ERR_NO_MEMORY:
 305                return VIRTCHNL_STATUS_ERR_NO_MEMORY;
 306        case ICE_ERR_NOT_READY:
 307        case ICE_ERR_RESET_FAILED:
 308        case ICE_ERR_FW_API_VER:
 309        case ICE_ERR_AQ_ERROR:
 310        case ICE_ERR_AQ_TIMEOUT:
 311        case ICE_ERR_AQ_FULL:
 312        case ICE_ERR_AQ_NO_WORK:
 313        case ICE_ERR_AQ_EMPTY:
 314                return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
 315        default:
 316                return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
 317        }
 318}
 319
 320/**
 321 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
 322 * @pf: pointer to the PF structure
 323 * @v_opcode: operation code
 324 * @v_retval: return value
 325 * @msg: pointer to the msg buffer
 326 * @msglen: msg length
 327 */
 328static void
 329ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
 330                    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
 331{
 332        struct ice_hw *hw = &pf->hw;
 333        unsigned int i;
 334
 335        ice_for_each_vf(pf, i) {
 336                struct ice_vf *vf = &pf->vf[i];
 337
 338                /* Not all vfs are enabled so skip the ones that are not */
 339                if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
 340                    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
 341                        continue;
 342
 343                /* Ignore return value on purpose - a given VF may fail, but
 344                 * we need to keep going and send to all of them
 345                 */
 346                ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
 347                                      msglen, NULL);
 348        }
 349}
 350
 351/**
 352 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
 353 * @vf: pointer to the VF structure
 354 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
 355 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
 356 * @link_up: whether or not to set the link up/down
 357 */
 358static void
 359ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
 360                 int ice_link_speed, bool link_up)
 361{
 362        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
 363                pfe->event_data.link_event_adv.link_status = link_up;
 364                /* Speed in Mbps */
 365                pfe->event_data.link_event_adv.link_speed =
 366                        ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
 367        } else {
 368                pfe->event_data.link_event.link_status = link_up;
 369                /* Legacy method for virtchnl link speeds */
 370                pfe->event_data.link_event.link_speed =
 371                        (enum virtchnl_link_speed)
 372                        ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
 373        }
 374}
 375
 376/**
 377 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
 378 * @vf: the VF to check
 379 *
 380 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
 381 * otherwise
 382 */
 383static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
 384{
 385        return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
 386                !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
 387}
 388
 389/**
 390 * ice_is_vf_link_up - check if the VF's link is up
 391 * @vf: VF to check if link is up
 392 */
 393static bool ice_is_vf_link_up(struct ice_vf *vf)
 394{
 395        struct ice_pf *pf = vf->pf;
 396
 397        if (ice_check_vf_init(pf, vf))
 398                return false;
 399
 400        if (ice_vf_has_no_qs_ena(vf))
 401                return false;
 402        else if (vf->link_forced)
 403                return vf->link_up;
 404        else
 405                return pf->hw.port_info->phy.link_info.link_info &
 406                        ICE_AQ_LINK_UP;
 407}
 408
 409/**
 410 * ice_vc_notify_vf_link_state - Inform a VF of link status
 411 * @vf: pointer to the VF structure
 412 *
 413 * send a link status message to a single VF
 414 */
 415static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
 416{
 417        struct virtchnl_pf_event pfe = { 0 };
 418        struct ice_hw *hw = &vf->pf->hw;
 419
 420        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 421        pfe.severity = PF_EVENT_SEVERITY_INFO;
 422
 423        if (ice_is_vf_link_up(vf))
 424                ice_set_pfe_link(vf, &pfe,
 425                                 hw->port_info->phy.link_info.link_speed, true);
 426        else
 427                ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
 428
 429        ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 430                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
 431                              sizeof(pfe), NULL);
 432}
 433
 434/**
 435 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
 436 * @vf: VF to remove access to VSI for
 437 */
 438static void ice_vf_invalidate_vsi(struct ice_vf *vf)
 439{
 440        vf->lan_vsi_idx = ICE_NO_VSI;
 441        vf->lan_vsi_num = ICE_NO_VSI;
 442}
 443
 444/**
 445 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
 446 * @vf: invalidate this VF's VSI after freeing it
 447 */
 448static void ice_vf_vsi_release(struct ice_vf *vf)
 449{
 450        ice_vsi_release(ice_get_vf_vsi(vf));
 451        ice_vf_invalidate_vsi(vf);
 452}
 453
 454/**
 455 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
 456 * @vf: VF that control VSI is being invalidated on
 457 */
 458static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
 459{
 460        vf->ctrl_vsi_idx = ICE_NO_VSI;
 461}
 462
 463/**
 464 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
 465 * @vf: VF that control VSI is being released on
 466 */
 467static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
 468{
 469        ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
 470        ice_vf_ctrl_invalidate_vsi(vf);
 471}
 472
 473/**
 474 * ice_free_vf_res - Free a VF's resources
 475 * @vf: pointer to the VF info
 476 */
 477static void ice_free_vf_res(struct ice_vf *vf)
 478{
 479        struct ice_pf *pf = vf->pf;
 480        int i, last_vector_idx;
 481
 482        /* First, disable VF's configuration API to prevent OS from
 483         * accessing the VF's VSI after it's freed or invalidated.
 484         */
 485        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 486        ice_vf_fdir_exit(vf);
 487        /* free VF control VSI */
 488        if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 489                ice_vf_ctrl_vsi_release(vf);
 490
 491        /* free VSI and disconnect it from the parent uplink */
 492        if (vf->lan_vsi_idx != ICE_NO_VSI) {
 493                ice_vf_vsi_release(vf);
 494                vf->num_mac = 0;
 495        }
 496
 497        last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
 498
 499        /* clear VF MDD event information */
 500        memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
 501        memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
 502
 503        /* Disable interrupts so that VF starts in a known state */
 504        for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
 505                wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
 506                ice_flush(&pf->hw);
 507        }
 508        /* reset some of the state variables keeping track of the resources */
 509        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 510        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 511}
 512
 513/**
 514 * ice_dis_vf_mappings
 515 * @vf: pointer to the VF structure
 516 */
 517static void ice_dis_vf_mappings(struct ice_vf *vf)
 518{
 519        struct ice_pf *pf = vf->pf;
 520        struct ice_vsi *vsi;
 521        struct device *dev;
 522        int first, last, v;
 523        struct ice_hw *hw;
 524
 525        hw = &pf->hw;
 526        vsi = ice_get_vf_vsi(vf);
 527
 528        dev = ice_pf_to_dev(pf);
 529        wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 530        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 531
 532        first = vf->first_vector_idx;
 533        last = first + pf->num_msix_per_vf - 1;
 534        for (v = first; v <= last; v++) {
 535                u32 reg;
 536
 537                reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
 538                        GLINT_VECT2FUNC_IS_PF_M) |
 539                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 540                        GLINT_VECT2FUNC_PF_NUM_M));
 541                wr32(hw, GLINT_VECT2FUNC(v), reg);
 542        }
 543
 544        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 545                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 546        else
 547                dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 548
 549        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 550                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 551        else
 552                dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 553}
 554
 555/**
 556 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 557 * @pf: pointer to the PF structure
 558 *
 559 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
 560 * the pf->sriov_base_vector.
 561 *
 562 * Returns 0 on success, and -EINVAL on error.
 563 */
 564static int ice_sriov_free_msix_res(struct ice_pf *pf)
 565{
 566        struct ice_res_tracker *res;
 567
 568        if (!pf)
 569                return -EINVAL;
 570
 571        res = pf->irq_tracker;
 572        if (!res)
 573                return -EINVAL;
 574
 575        /* give back irq_tracker resources used */
 576        WARN_ON(pf->sriov_base_vector < res->num_entries);
 577
 578        pf->sriov_base_vector = 0;
 579
 580        return 0;
 581}
 582
 583/**
 584 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 585 * @vf: pointer to the VF structure
 586 */
 587void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 588{
 589        /* Clear Rx/Tx enabled queues flag */
 590        bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
 591        bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
 592        clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 593}
 594
 595/**
 596 * ice_dis_vf_qs - Disable the VF queues
 597 * @vf: pointer to the VF structure
 598 */
 599static void ice_dis_vf_qs(struct ice_vf *vf)
 600{
 601        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 602
 603        ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 604        ice_vsi_stop_all_rx_rings(vsi);
 605        ice_set_vf_state_qs_dis(vf);
 606}
 607
 608/**
 609 * ice_free_vfs - Free all VFs
 610 * @pf: pointer to the PF structure
 611 */
 612void ice_free_vfs(struct ice_pf *pf)
 613{
 614        struct device *dev = ice_pf_to_dev(pf);
 615        struct ice_hw *hw = &pf->hw;
 616        unsigned int tmp, i;
 617
 618        set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
 619
 620        if (!pf->vf)
 621                return;
 622
 623        while (test_and_set_bit(ICE_VF_DIS, pf->state))
 624                usleep_range(1000, 2000);
 625
 626        /* Disable IOV before freeing resources. This lets any VF drivers
 627         * running in the host get themselves cleaned up before we yank
 628         * the carpet out from underneath their feet.
 629         */
 630        if (!pci_vfs_assigned(pf->pdev))
 631                pci_disable_sriov(pf->pdev);
 632        else
 633                dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 634
 635        /* Avoid wait time by stopping all VFs at the same time */
 636        ice_for_each_vf(pf, i)
 637                if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
 638                        ice_dis_vf_qs(&pf->vf[i]);
 639
 640        tmp = pf->num_alloc_vfs;
 641        pf->num_qps_per_vf = 0;
 642        pf->num_alloc_vfs = 0;
 643        for (i = 0; i < tmp; i++) {
 644                if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
 645                        /* disable VF qp mappings and set VF disable state */
 646                        ice_dis_vf_mappings(&pf->vf[i]);
 647                        set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
 648                        ice_free_vf_res(&pf->vf[i]);
 649                }
 650        }
 651
 652        if (ice_sriov_free_msix_res(pf))
 653                dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
 654
 655        devm_kfree(dev, pf->vf);
 656        pf->vf = NULL;
 657
 658        /* This check is for when the driver is unloaded while VFs are
 659         * assigned. Setting the number of VFs to 0 through sysfs is caught
 660         * before this function ever gets called.
 661         */
 662        if (!pci_vfs_assigned(pf->pdev)) {
 663                unsigned int vf_id;
 664
 665                /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
 666                 * work correctly when SR-IOV gets re-enabled.
 667                 */
 668                for (vf_id = 0; vf_id < tmp; vf_id++) {
 669                        u32 reg_idx, bit_idx;
 670
 671                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 672                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 673                        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 674                }
 675        }
 676
 677        /* clear malicious info if the VFs are getting released */
 678        for (i = 0; i < tmp; i++)
 679                if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
 680                                        ICE_MAX_VF_COUNT, i))
 681                        dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
 682                                i);
 683
 684        clear_bit(ICE_VF_DIS, pf->state);
 685        clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
 686        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 687}
 688
 689/**
 690 * ice_trigger_vf_reset - Reset a VF on HW
 691 * @vf: pointer to the VF structure
 692 * @is_vflr: true if VFLR was issued, false if not
 693 * @is_pfr: true if the reset was triggered due to a previous PFR
 694 *
 695 * Trigger hardware to start a reset for a particular VF. Expects the caller
 696 * to wait the proper amount of time to allow hardware to reset the VF before
 697 * it cleans up and restores VF functionality.
 698 */
 699static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 700{
 701        struct ice_pf *pf = vf->pf;
 702        u32 reg, reg_idx, bit_idx;
 703        unsigned int vf_abs_id, i;
 704        struct device *dev;
 705        struct ice_hw *hw;
 706
 707        dev = ice_pf_to_dev(pf);
 708        hw = &pf->hw;
 709        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 710
 711        /* Inform VF that it is no longer active, as a warning */
 712        clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 713
 714        /* Disable VF's configuration API during reset. The flag is re-enabled
 715         * when it's safe again to access VF's VSI.
 716         */
 717        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 718
 719        /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
 720         * needs to clear them in the case of VFR/VFLR. If this is done for
 721         * PFR, it can mess up VF resets because the VF driver may already
 722         * have started cleanup by the time we get here.
 723         */
 724        if (!is_pfr) {
 725                wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
 726                wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
 727        }
 728
 729        /* In the case of a VFLR, the HW has already reset the VF and we
 730         * just need to clean up, so don't hit the VFRTRIG register.
 731         */
 732        if (!is_vflr) {
 733                /* reset VF using VPGEN_VFRTRIG reg */
 734                reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 735                reg |= VPGEN_VFRTRIG_VFSWR_M;
 736                wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 737        }
 738        /* clear the VFLR bit in GLGEN_VFLRSTAT */
 739        reg_idx = (vf_abs_id) / 32;
 740        bit_idx = (vf_abs_id) % 32;
 741        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 742        ice_flush(hw);
 743
 744        wr32(hw, PF_PCI_CIAA,
 745             VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 746        for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
 747                reg = rd32(hw, PF_PCI_CIAD);
 748                /* no transactions pending so stop polling */
 749                if ((reg & VF_TRANS_PENDING_M) == 0)
 750                        break;
 751
 752                dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
 753                udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
 754        }
 755}
 756
 757/**
 758 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
 759 * @vsi: the VSI to update
 760 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
 761 * @enable: true for enable PVID false for disable
 762 */
 763static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
 764{
 765        struct ice_hw *hw = &vsi->back->hw;
 766        struct ice_aqc_vsi_props *info;
 767        struct ice_vsi_ctx *ctxt;
 768        enum ice_status status;
 769        int ret = 0;
 770
 771        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
 772        if (!ctxt)
 773                return -ENOMEM;
 774
 775        ctxt->info = vsi->info;
 776        info = &ctxt->info;
 777        if (enable) {
 778                info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
 779                        ICE_AQ_VSI_PVLAN_INSERT_PVID |
 780                        ICE_AQ_VSI_VLAN_EMOD_STR;
 781                info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 782        } else {
 783                info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
 784                        ICE_AQ_VSI_VLAN_MODE_ALL;
 785                info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 786        }
 787
 788        info->pvid = cpu_to_le16(pvid_info);
 789        info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 790                                           ICE_AQ_VSI_PROP_SW_VALID);
 791
 792        status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
 793        if (status) {
 794                dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
 795                         ice_stat_str(status),
 796                         ice_aq_str(hw->adminq.sq_last_status));
 797                ret = -EIO;
 798                goto out;
 799        }
 800
 801        vsi->info.vlan_flags = info->vlan_flags;
 802        vsi->info.sw_flags2 = info->sw_flags2;
 803        vsi->info.pvid = info->pvid;
 804out:
 805        kfree(ctxt);
 806        return ret;
 807}
 808
 809/**
 810 * ice_vf_get_port_info - Get the VF's port info structure
 811 * @vf: VF used to get the port info structure for
 812 */
 813static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
 814{
 815        return vf->pf->hw.port_info;
 816}
 817
 818/**
 819 * ice_vf_vsi_setup - Set up a VF VSI
 820 * @vf: VF to setup VSI for
 821 *
 822 * Returns pointer to the successfully allocated VSI struct on success,
 823 * otherwise returns NULL on failure.
 824 */
 825static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
 826{
 827        struct ice_port_info *pi = ice_vf_get_port_info(vf);
 828        struct ice_pf *pf = vf->pf;
 829        struct ice_vsi *vsi;
 830
 831        vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
 832
 833        if (!vsi) {
 834                dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
 835                ice_vf_invalidate_vsi(vf);
 836                return NULL;
 837        }
 838
 839        vf->lan_vsi_idx = vsi->idx;
 840        vf->lan_vsi_num = vsi->vsi_num;
 841
 842        return vsi;
 843}
 844
 845/**
 846 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
 847 * @vf: VF to setup control VSI for
 848 *
 849 * Returns pointer to the successfully allocated VSI struct on success,
 850 * otherwise returns NULL on failure.
 851 */
 852struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
 853{
 854        struct ice_port_info *pi = ice_vf_get_port_info(vf);
 855        struct ice_pf *pf = vf->pf;
 856        struct ice_vsi *vsi;
 857
 858        vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
 859        if (!vsi) {
 860                dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
 861                ice_vf_ctrl_invalidate_vsi(vf);
 862        }
 863
 864        return vsi;
 865}
 866
 867/**
 868 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
 869 * @pf: pointer to PF structure
 870 * @vf: pointer to VF that the first MSIX vector index is being calculated for
 871 *
 872 * This returns the first MSIX vector index in PF space that is used by this VF.
 873 * This index is used when accessing PF relative registers such as
 874 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
 875 * This will always be the OICR index in the AVF driver so any functionality
 876 * using vf->first_vector_idx for queue configuration will have to increment by
 877 * 1 to avoid meddling with the OICR index.
 878 */
 879static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
 880{
 881        return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
 882}
 883
 884/**
 885 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
 886 * @vf: VF to add MAC filters for
 887 *
 888 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 889 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
 890 */
 891static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
 892{
 893        struct device *dev = ice_pf_to_dev(vf->pf);
 894        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 895        u16 vlan_id = 0;
 896        int err;
 897
 898        if (vf->port_vlan_info) {
 899                err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
 900                if (err) {
 901                        dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
 902                                vf->vf_id, err);
 903                        return err;
 904                }
 905
 906                vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
 907        }
 908
 909        /* vlan_id will either be 0 or the port VLAN number */
 910        err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
 911        if (err) {
 912                dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
 913                        vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
 914                        err);
 915                return err;
 916        }
 917
 918        return 0;
 919}
 920
 921/**
 922 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
 923 * @vf: VF to add MAC filters for
 924 *
 925 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 926 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
 927 */
 928static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
 929{
 930        struct device *dev = ice_pf_to_dev(vf->pf);
 931        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 932        enum ice_status status;
 933        u8 broadcast[ETH_ALEN];
 934
 935        eth_broadcast_addr(broadcast);
 936        status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
 937        if (status) {
 938                dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
 939                        vf->vf_id, ice_stat_str(status));
 940                return ice_status_to_errno(status);
 941        }
 942
 943        vf->num_mac++;
 944
 945        if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
 946                status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
 947                                          ICE_FWD_TO_VSI);
 948                if (status) {
 949                        dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
 950                                &vf->dflt_lan_addr.addr[0], vf->vf_id,
 951                                ice_stat_str(status));
 952                        return ice_status_to_errno(status);
 953                }
 954                vf->num_mac++;
 955        }
 956
 957        return 0;
 958}
 959
 960/**
 961 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
 962 * @vf: VF to configure trust setting for
 963 */
 964static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
 965{
 966        if (vf->trusted)
 967                set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 968        else
 969                clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 970}
 971
 972/**
 973 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
 974 * @vf: VF to enable MSIX mappings for
 975 *
 976 * Some of the registers need to be indexed/configured using hardware global
 977 * device values and other registers need 0-based values, which represent PF
 978 * based values.
 979 */
 980static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
 981{
 982        int device_based_first_msix, device_based_last_msix;
 983        int pf_based_first_msix, pf_based_last_msix, v;
 984        struct ice_pf *pf = vf->pf;
 985        int device_based_vf_id;
 986        struct ice_hw *hw;
 987        u32 reg;
 988
 989        hw = &pf->hw;
 990        pf_based_first_msix = vf->first_vector_idx;
 991        pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
 992
 993        device_based_first_msix = pf_based_first_msix +
 994                pf->hw.func_caps.common_cap.msix_vector_first_id;
 995        device_based_last_msix =
 996                (device_based_first_msix + pf->num_msix_per_vf) - 1;
 997        device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 998
 999        reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
1000                VPINT_ALLOC_FIRST_M) |
1001               ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
1002                VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
1003        wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
1004
1005        reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1006                 & VPINT_ALLOC_PCI_FIRST_M) |
1007               ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
1008                VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
1009        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
1010
1011        /* map the interrupts to its functions */
1012        for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
1013                reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
1014                        GLINT_VECT2FUNC_VF_NUM_M) |
1015                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
1016                        GLINT_VECT2FUNC_PF_NUM_M));
1017                wr32(hw, GLINT_VECT2FUNC(v), reg);
1018        }
1019
1020        /* Map mailbox interrupt to VF MSI-X vector 0 */
1021        wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
1022}
1023
1024/**
1025 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
1026 * @vf: VF to enable the mappings for
1027 * @max_txq: max Tx queues allowed on the VF's VSI
1028 * @max_rxq: max Rx queues allowed on the VF's VSI
1029 */
1030static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
1031{
1032        struct device *dev = ice_pf_to_dev(vf->pf);
1033        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1034        struct ice_hw *hw = &vf->pf->hw;
1035        u32 reg;
1036
1037        /* set regardless of mapping mode */
1038        wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
1039
1040        /* VF Tx queues allocation */
1041        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1042                /* set the VF PF Tx queue range
1043                 * VFNUMQ value should be set to (number of queues - 1). A value
1044                 * of 0 means 1 queue and a value of 255 means 256 queues
1045                 */
1046                reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
1047                        VPLAN_TX_QBASE_VFFIRSTQ_M) |
1048                       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
1049                        VPLAN_TX_QBASE_VFNUMQ_M));
1050                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
1051        } else {
1052                dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
1053        }
1054
1055        /* set regardless of mapping mode */
1056        wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
1057
1058        /* VF Rx queues allocation */
1059        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1060                /* set the VF PF Rx queue range
1061                 * VFNUMQ value should be set to (number of queues - 1). A value
1062                 * of 0 means 1 queue and a value of 255 means 256 queues
1063                 */
1064                reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
1065                        VPLAN_RX_QBASE_VFFIRSTQ_M) |
1066                       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
1067                        VPLAN_RX_QBASE_VFNUMQ_M));
1068                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
1069        } else {
1070                dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
1071        }
1072}
1073
1074/**
1075 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
1076 * @vf: pointer to the VF structure
1077 */
1078static void ice_ena_vf_mappings(struct ice_vf *vf)
1079{
1080        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1081
1082        ice_ena_vf_msix_mappings(vf);
1083        ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
1084}
1085
1086/**
1087 * ice_determine_res
1088 * @pf: pointer to the PF structure
1089 * @avail_res: available resources in the PF structure
1090 * @max_res: maximum resources that can be given per VF
1091 * @min_res: minimum resources that can be given per VF
1092 *
1093 * Returns non-zero value if resources (queues/vectors) are available or
1094 * returns zero if PF cannot accommodate for all num_alloc_vfs.
1095 */
1096static int
1097ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
1098{
1099        bool checked_min_res = false;
1100        int res;
1101
1102        /* start by checking if PF can assign max number of resources for
1103         * all num_alloc_vfs.
1104         * if yes, return number per VF
1105         * If no, divide by 2 and roundup, check again
1106         * repeat the loop till we reach a point where even minimum resources
1107         * are not available, in that case return 0
1108         */
1109        res = max_res;
1110        while ((res >= min_res) && !checked_min_res) {
1111                int num_all_res;
1112
1113                num_all_res = pf->num_alloc_vfs * res;
1114                if (num_all_res <= avail_res)
1115                        return res;
1116
1117                if (res == min_res)
1118                        checked_min_res = true;
1119
1120                res = DIV_ROUND_UP(res, 2);
1121        }
1122        return 0;
1123}
1124
1125/**
1126 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
1127 * @vf: VF to calculate the register index for
1128 * @q_vector: a q_vector associated to the VF
1129 */
1130int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
1131{
1132        struct ice_pf *pf;
1133
1134        if (!vf || !q_vector)
1135                return -EINVAL;
1136
1137        pf = vf->pf;
1138
1139        /* always add one to account for the OICR being the first MSIX */
1140        return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
1141                q_vector->v_idx + 1;
1142}
1143
1144/**
1145 * ice_get_max_valid_res_idx - Get the max valid resource index
1146 * @res: pointer to the resource to find the max valid index for
1147 *
1148 * Start from the end of the ice_res_tracker and return right when we find the
1149 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
1150 * valid for SR-IOV because it is the only consumer that manipulates the
1151 * res->end and this is always called when res->end is set to res->num_entries.
1152 */
1153static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
1154{
1155        int i;
1156
1157        if (!res)
1158                return -EINVAL;
1159
1160        for (i = res->num_entries - 1; i >= 0; i--)
1161                if (res->list[i] & ICE_RES_VALID_BIT)
1162                        return i;
1163
1164        return 0;
1165}
1166
1167/**
1168 * ice_sriov_set_msix_res - Set any used MSIX resources
1169 * @pf: pointer to PF structure
1170 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
1171 *
1172 * This function allows SR-IOV resources to be taken from the end of the PF's
1173 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
1174 * just set the pf->sriov_base_vector and return success.
1175 *
1176 * If there are not enough resources available, return an error. This should
1177 * always be caught by ice_set_per_vf_res().
1178 *
1179 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
1180 * in the PF's space available for SR-IOV.
1181 */
1182static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
1183{
1184        u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1185        int vectors_used = pf->irq_tracker->num_entries;
1186        int sriov_base_vector;
1187
1188        sriov_base_vector = total_vectors - num_msix_needed;
1189
1190        /* make sure we only grab irq_tracker entries from the list end and
1191         * that we have enough available MSIX vectors
1192         */
1193        if (sriov_base_vector < vectors_used)
1194                return -EINVAL;
1195
1196        pf->sriov_base_vector = sriov_base_vector;
1197
1198        return 0;
1199}
1200
1201/**
1202 * ice_set_per_vf_res - check if vectors and queues are available
1203 * @pf: pointer to the PF structure
1204 *
1205 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
1206 * get more vectors and can enable more queues per VF. Note that this does not
1207 * grab any vectors from the SW pool already allocated. Also note, that all
1208 * vector counts include one for each VF's miscellaneous interrupt vector
1209 * (i.e. OICR).
1210 *
1211 * Minimum VFs - 2 vectors, 1 queue pair
1212 * Small VFs - 5 vectors, 4 queue pairs
1213 * Medium VFs - 17 vectors, 16 queue pairs
1214 *
1215 * Second, determine number of queue pairs per VF by starting with a pre-defined
1216 * maximum each VF supports. If this is not possible, then we adjust based on
1217 * queue pairs available on the device.
1218 *
1219 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
1220 * by each VF during VF initialization and reset.
1221 */
1222static int ice_set_per_vf_res(struct ice_pf *pf)
1223{
1224        int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
1225        int msix_avail_per_vf, msix_avail_for_sriov;
1226        struct device *dev = ice_pf_to_dev(pf);
1227        u16 num_msix_per_vf, num_txq, num_rxq;
1228
1229        if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
1230                return -EINVAL;
1231
1232        /* determine MSI-X resources per VF */
1233        msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
1234                pf->irq_tracker->num_entries;
1235        msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
1236        if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
1237                num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
1238        } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
1239                num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
1240        } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
1241                num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
1242        } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
1243                num_msix_per_vf = ICE_MIN_INTR_PER_VF;
1244        } else {
1245                dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
1246                        msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
1247                        pf->num_alloc_vfs);
1248                return -EIO;
1249        }
1250
1251        /* determine queue resources per VF */
1252        num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
1253                                    min_t(u16,
1254                                          num_msix_per_vf - ICE_NONQ_VECS_VF,
1255                                          ICE_MAX_RSS_QS_PER_VF),
1256                                    ICE_MIN_QS_PER_VF);
1257
1258        num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
1259                                    min_t(u16,
1260                                          num_msix_per_vf - ICE_NONQ_VECS_VF,
1261                                          ICE_MAX_RSS_QS_PER_VF),
1262                                    ICE_MIN_QS_PER_VF);
1263
1264        if (!num_txq || !num_rxq) {
1265                dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
1266                        ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
1267                return -EIO;
1268        }
1269
1270        if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
1271                dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
1272                        pf->num_alloc_vfs);
1273                return -EINVAL;
1274        }
1275
1276        /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
1277        pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1278        pf->num_msix_per_vf = num_msix_per_vf;
1279        dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
1280                 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
1281
1282        return 0;
1283}
1284
1285/**
1286 * ice_clear_vf_reset_trigger - enable VF to access hardware
1287 * @vf: VF to enabled hardware access for
1288 */
1289static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1290{
1291        struct ice_hw *hw = &vf->pf->hw;
1292        u32 reg;
1293
1294        reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1295        reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1296        wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1297        ice_flush(hw);
1298}
1299
1300/**
1301 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1302 * @vf: pointer to the VF info
1303 * @vsi: the VSI being configured
1304 * @promisc_m: mask of promiscuous config bits
1305 * @rm_promisc: promisc flag request from the VF to remove or add filter
1306 *
1307 * This function configures VF VSI promiscuous mode, based on the VF requests,
1308 * for Unicast, Multicast and VLAN
1309 */
1310static enum ice_status
1311ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1312                       bool rm_promisc)
1313{
1314        struct ice_pf *pf = vf->pf;
1315        enum ice_status status = 0;
1316        struct ice_hw *hw;
1317
1318        hw = &pf->hw;
1319        if (vsi->num_vlan) {
1320                status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1321                                                  rm_promisc);
1322        } else if (vf->port_vlan_info) {
1323                if (rm_promisc)
1324                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1325                                                       vf->port_vlan_info);
1326                else
1327                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1328                                                     vf->port_vlan_info);
1329        } else {
1330                if (rm_promisc)
1331                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1332                                                       0);
1333                else
1334                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1335                                                     0);
1336        }
1337
1338        return status;
1339}
1340
1341static void ice_vf_clear_counters(struct ice_vf *vf)
1342{
1343        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1344
1345        vf->num_mac = 0;
1346        vsi->num_vlan = 0;
1347        memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1348        memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1349}
1350
1351/**
1352 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1353 * @vf: VF to perform pre VSI rebuild tasks
1354 *
1355 * These tasks are items that don't need to be amortized since they are most
1356 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1357 */
1358static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1359{
1360        ice_vf_clear_counters(vf);
1361        ice_clear_vf_reset_trigger(vf);
1362}
1363
1364/**
1365 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1366 * @vsi: Pointer to VSI
1367 *
1368 * This function moves VSI into corresponding scheduler aggregator node
1369 * based on cached value of "aggregator node info" per VSI
1370 */
1371static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1372{
1373        struct ice_pf *pf = vsi->back;
1374        enum ice_status status;
1375        struct device *dev;
1376
1377        if (!vsi->agg_node)
1378                return;
1379
1380        dev = ice_pf_to_dev(pf);
1381        if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1382                dev_dbg(dev,
1383                        "agg_id %u already has reached max_num_vsis %u\n",
1384                        vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1385                return;
1386        }
1387
1388        status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1389                                     vsi->idx, vsi->tc_cfg.ena_tc);
1390        if (status)
1391                dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1392                        vsi->idx, vsi->agg_node->agg_id);
1393        else
1394                vsi->agg_node->num_vsis++;
1395}
1396
1397/**
1398 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1399 * @vf: VF to rebuild host configuration on
1400 */
1401static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1402{
1403        struct device *dev = ice_pf_to_dev(vf->pf);
1404        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1405
1406        ice_vf_set_host_trust_cfg(vf);
1407
1408        if (ice_vf_rebuild_host_mac_cfg(vf))
1409                dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1410                        vf->vf_id);
1411
1412        if (ice_vf_rebuild_host_vlan_cfg(vf))
1413                dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1414                        vf->vf_id);
1415        /* rebuild aggregator node config for main VF VSI */
1416        ice_vf_rebuild_aggregator_node_cfg(vsi);
1417}
1418
1419/**
1420 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1421 * @vf: VF to release and setup the VSI for
1422 *
1423 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1424 * configuration change, etc.).
1425 */
1426static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1427{
1428        ice_vf_vsi_release(vf);
1429        if (!ice_vf_vsi_setup(vf))
1430                return -ENOMEM;
1431
1432        return 0;
1433}
1434
1435/**
1436 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1437 * @vf: VF to rebuild the VSI for
1438 *
1439 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1440 * host, PFR, CORER, etc.).
1441 */
1442static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1443{
1444        struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1445        struct ice_pf *pf = vf->pf;
1446
1447        if (ice_vsi_rebuild(vsi, true)) {
1448                dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1449                        vf->vf_id);
1450                return -EIO;
1451        }
1452        /* vsi->idx will remain the same in this case so don't update
1453         * vf->lan_vsi_idx
1454         */
1455        vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1456        vf->lan_vsi_num = vsi->vsi_num;
1457
1458        return 0;
1459}
1460
1461/**
1462 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1463 * @vf: VF to set in initialized state
1464 *
1465 * After this function the VF will be ready to receive/handle the
1466 * VIRTCHNL_OP_GET_VF_RESOURCES message
1467 */
1468static void ice_vf_set_initialized(struct ice_vf *vf)
1469{
1470        ice_set_vf_state_qs_dis(vf);
1471        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1472        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1473        clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1474        set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1475}
1476
1477/**
1478 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1479 * @vf: VF to perform tasks on
1480 */
1481static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1482{
1483        struct ice_pf *pf = vf->pf;
1484        struct ice_hw *hw;
1485
1486        hw = &pf->hw;
1487
1488        ice_vf_rebuild_host_cfg(vf);
1489
1490        ice_vf_set_initialized(vf);
1491        ice_ena_vf_mappings(vf);
1492        wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1493}
1494
1495/**
1496 * ice_reset_all_vfs - reset all allocated VFs in one go
1497 * @pf: pointer to the PF structure
1498 * @is_vflr: true if VFLR was issued, false if not
1499 *
1500 * First, tell the hardware to reset each VF, then do all the waiting in one
1501 * chunk, and finally finish restoring each VF after the wait. This is useful
1502 * during PF routines which need to reset all VFs, as otherwise it must perform
1503 * these resets in a serialized fashion.
1504 *
1505 * Returns true if any VFs were reset, and false otherwise.
1506 */
1507bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1508{
1509        struct device *dev = ice_pf_to_dev(pf);
1510        struct ice_hw *hw = &pf->hw;
1511        struct ice_vf *vf;
1512        int v, i;
1513
1514        /* If we don't have any VFs, then there is nothing to reset */
1515        if (!pf->num_alloc_vfs)
1516                return false;
1517
1518        /* clear all malicious info if the VFs are getting reset */
1519        ice_for_each_vf(pf, i)
1520                if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
1521                        dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1522
1523        /* If VFs have been disabled, there is no need to reset */
1524        if (test_and_set_bit(ICE_VF_DIS, pf->state))
1525                return false;
1526
1527        /* Begin reset on all VFs at once */
1528        ice_for_each_vf(pf, v)
1529                ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1530
1531        /* HW requires some time to make sure it can flush the FIFO for a VF
1532         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1533         * sequence to make sure that it has completed. We'll keep track of
1534         * the VFs using a simple iterator that increments once that VF has
1535         * finished resetting.
1536         */
1537        for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1538                /* Check each VF in sequence */
1539                while (v < pf->num_alloc_vfs) {
1540                        u32 reg;
1541
1542                        vf = &pf->vf[v];
1543                        reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1544                        if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1545                                /* only delay if the check failed */
1546                                usleep_range(10, 20);
1547                                break;
1548                        }
1549
1550                        /* If the current VF has finished resetting, move on
1551                         * to the next VF in sequence.
1552                         */
1553                        v++;
1554                }
1555        }
1556
1557        /* Display a warning if at least one VF didn't manage to reset in
1558         * time, but continue on with the operation.
1559         */
1560        if (v < pf->num_alloc_vfs)
1561                dev_warn(dev, "VF reset check timeout\n");
1562
1563        /* free VF resources to begin resetting the VSI state */
1564        ice_for_each_vf(pf, v) {
1565                vf = &pf->vf[v];
1566
1567                vf->driver_caps = 0;
1568                ice_vc_set_default_allowlist(vf);
1569
1570                ice_vf_fdir_exit(vf);
1571                /* clean VF control VSI when resetting VFs since it should be
1572                 * setup only when VF creates its first FDIR rule.
1573                 */
1574                if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1575                        ice_vf_ctrl_invalidate_vsi(vf);
1576
1577                ice_vf_pre_vsi_rebuild(vf);
1578                ice_vf_rebuild_vsi(vf);
1579                ice_vf_post_vsi_rebuild(vf);
1580        }
1581
1582        ice_flush(hw);
1583        clear_bit(ICE_VF_DIS, pf->state);
1584
1585        return true;
1586}
1587
1588/**
1589 * ice_is_vf_disabled
1590 * @vf: pointer to the VF info
1591 *
1592 * Returns true if the PF or VF is disabled, false otherwise.
1593 */
1594static bool ice_is_vf_disabled(struct ice_vf *vf)
1595{
1596        struct ice_pf *pf = vf->pf;
1597
1598        /* If the PF has been disabled, there is no need resetting VF until
1599         * PF is active again. Similarly, if the VF has been disabled, this
1600         * means something else is resetting the VF, so we shouldn't continue.
1601         * Otherwise, set disable VF state bit for actual reset, and continue.
1602         */
1603        return (test_bit(ICE_VF_DIS, pf->state) ||
1604                test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1605}
1606
1607/**
1608 * ice_reset_vf - Reset a particular VF
1609 * @vf: pointer to the VF structure
1610 * @is_vflr: true if VFLR was issued, false if not
1611 *
1612 * Returns true if the VF is currently in reset, resets successfully, or resets
1613 * are disabled and false otherwise.
1614 */
1615bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1616{
1617        struct ice_pf *pf = vf->pf;
1618        struct ice_vsi *vsi;
1619        struct device *dev;
1620        struct ice_hw *hw;
1621        bool rsd = false;
1622        u8 promisc_m;
1623        u32 reg;
1624        int i;
1625
1626        dev = ice_pf_to_dev(pf);
1627
1628        if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
1629                dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1630                        vf->vf_id);
1631                return true;
1632        }
1633
1634        if (ice_is_vf_disabled(vf)) {
1635                dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1636                        vf->vf_id);
1637                return true;
1638        }
1639
1640        /* Set VF disable bit state here, before triggering reset */
1641        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1642        ice_trigger_vf_reset(vf, is_vflr, false);
1643
1644        vsi = ice_get_vf_vsi(vf);
1645
1646        if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1647                ice_dis_vf_qs(vf);
1648
1649        /* Call Disable LAN Tx queue AQ whether or not queues are
1650         * enabled. This is needed for successful completion of VFR.
1651         */
1652        ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1653                        NULL, ICE_VF_RESET, vf->vf_id, NULL);
1654
1655        hw = &pf->hw;
1656        /* poll VPGEN_VFRSTAT reg to make sure
1657         * that reset is complete
1658         */
1659        for (i = 0; i < 10; i++) {
1660                /* VF reset requires driver to first reset the VF and then
1661                 * poll the status register to make sure that the reset
1662                 * completed successfully.
1663                 */
1664                reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1665                if (reg & VPGEN_VFRSTAT_VFRD_M) {
1666                        rsd = true;
1667                        break;
1668                }
1669
1670                /* only sleep if the reset is not done */
1671                usleep_range(10, 20);
1672        }
1673
1674        vf->driver_caps = 0;
1675        ice_vc_set_default_allowlist(vf);
1676
1677        /* Display a warning if VF didn't manage to reset in time, but need to
1678         * continue on with the operation.
1679         */
1680        if (!rsd)
1681                dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1682
1683        /* disable promiscuous modes in case they were enabled
1684         * ignore any error if disabling process failed
1685         */
1686        if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1687            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1688                if (vf->port_vlan_info || vsi->num_vlan)
1689                        promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1690                else
1691                        promisc_m = ICE_UCAST_PROMISC_BITS;
1692
1693                vsi = ice_get_vf_vsi(vf);
1694                if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1695                        dev_err(dev, "disabling promiscuous mode failed\n");
1696        }
1697
1698        ice_vf_fdir_exit(vf);
1699        /* clean VF control VSI when resetting VF since it should be setup
1700         * only when VF creates its first FDIR rule.
1701         */
1702        if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1703                ice_vf_ctrl_vsi_release(vf);
1704
1705        ice_vf_pre_vsi_rebuild(vf);
1706
1707        if (ice_vf_rebuild_vsi_with_release(vf)) {
1708                dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1709                return false;
1710        }
1711
1712        ice_vf_post_vsi_rebuild(vf);
1713
1714        /* if the VF has been reset allow it to come up again */
1715        if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
1716                dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1717
1718        return true;
1719}
1720
1721/**
1722 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1723 * @pf: pointer to the PF structure
1724 */
1725void ice_vc_notify_link_state(struct ice_pf *pf)
1726{
1727        int i;
1728
1729        ice_for_each_vf(pf, i)
1730                ice_vc_notify_vf_link_state(&pf->vf[i]);
1731}
1732
1733/**
1734 * ice_vc_notify_reset - Send pending reset message to all VFs
1735 * @pf: pointer to the PF structure
1736 *
1737 * indicate a pending reset to all VFs on a given PF
1738 */
1739void ice_vc_notify_reset(struct ice_pf *pf)
1740{
1741        struct virtchnl_pf_event pfe;
1742
1743        if (!pf->num_alloc_vfs)
1744                return;
1745
1746        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1747        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1748        ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1749                            (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1750}
1751
1752/**
1753 * ice_vc_notify_vf_reset - Notify VF of a reset event
1754 * @vf: pointer to the VF structure
1755 */
1756static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1757{
1758        struct virtchnl_pf_event pfe;
1759        struct ice_pf *pf;
1760
1761        if (!vf)
1762                return;
1763
1764        pf = vf->pf;
1765        if (ice_validate_vf_id(pf, vf->vf_id))
1766                return;
1767
1768        /* Bail out if VF is in disabled state, neither initialized, nor active
1769         * state - otherwise proceed with notifications
1770         */
1771        if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1772             !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1773            test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1774                return;
1775
1776        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1777        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1778        ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1779                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1780                              NULL);
1781}
1782
1783/**
1784 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1785 * @vf: VF to initialize/setup the VSI for
1786 *
1787 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1788 * VF VSI's broadcast filter and is only used during initial VF creation.
1789 */
1790static int ice_init_vf_vsi_res(struct ice_vf *vf)
1791{
1792        struct ice_pf *pf = vf->pf;
1793        u8 broadcast[ETH_ALEN];
1794        enum ice_status status;
1795        struct ice_vsi *vsi;
1796        struct device *dev;
1797        int err;
1798
1799        vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1800
1801        dev = ice_pf_to_dev(pf);
1802        vsi = ice_vf_vsi_setup(vf);
1803        if (!vsi)
1804                return -ENOMEM;
1805
1806        err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1807        if (err) {
1808                dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1809                         vf->vf_id);
1810                goto release_vsi;
1811        }
1812
1813        eth_broadcast_addr(broadcast);
1814        status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1815        if (status) {
1816                dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1817                        vf->vf_id, ice_stat_str(status));
1818                err = ice_status_to_errno(status);
1819                goto release_vsi;
1820        }
1821
1822        vf->num_mac = 1;
1823
1824        return 0;
1825
1826release_vsi:
1827        ice_vf_vsi_release(vf);
1828        return err;
1829}
1830
1831/**
1832 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1833 * @pf: PF the VFs are associated with
1834 */
1835static int ice_start_vfs(struct ice_pf *pf)
1836{
1837        struct ice_hw *hw = &pf->hw;
1838        int retval, i;
1839
1840        ice_for_each_vf(pf, i) {
1841                struct ice_vf *vf = &pf->vf[i];
1842
1843                ice_clear_vf_reset_trigger(vf);
1844
1845                retval = ice_init_vf_vsi_res(vf);
1846                if (retval) {
1847                        dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1848                                vf->vf_id, retval);
1849                        goto teardown;
1850                }
1851
1852                set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1853                ice_ena_vf_mappings(vf);
1854                wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1855        }
1856
1857        ice_flush(hw);
1858        return 0;
1859
1860teardown:
1861        for (i = i - 1; i >= 0; i--) {
1862                struct ice_vf *vf = &pf->vf[i];
1863
1864                ice_dis_vf_mappings(vf);
1865                ice_vf_vsi_release(vf);
1866        }
1867
1868        return retval;
1869}
1870
1871/**
1872 * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
1873 * @pf: PF holding reference to all VFs for default configuration
1874 */
1875static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1876{
1877        int i;
1878
1879        ice_for_each_vf(pf, i) {
1880                struct ice_vf *vf = &pf->vf[i];
1881
1882                vf->pf = pf;
1883                vf->vf_id = i;
1884                vf->vf_sw_id = pf->first_sw;
1885                /* assign default capabilities */
1886                set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1887                vf->spoofchk = true;
1888                vf->num_vf_qs = pf->num_qps_per_vf;
1889                ice_vc_set_default_allowlist(vf);
1890
1891                /* ctrl_vsi_idx will be set to a valid value only when VF
1892                 * creates its first fdir rule.
1893                 */
1894                ice_vf_ctrl_invalidate_vsi(vf);
1895                ice_vf_fdir_init(vf);
1896        }
1897}
1898
1899/**
1900 * ice_alloc_vfs - allocate num_vfs in the PF structure
1901 * @pf: PF to store the allocated VFs in
1902 * @num_vfs: number of VFs to allocate
1903 */
1904static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1905{
1906        struct ice_vf *vfs;
1907
1908        vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1909                           GFP_KERNEL);
1910        if (!vfs)
1911                return -ENOMEM;
1912
1913        pf->vf = vfs;
1914        pf->num_alloc_vfs = num_vfs;
1915
1916        return 0;
1917}
1918
1919/**
1920 * ice_ena_vfs - enable VFs so they are ready to be used
1921 * @pf: pointer to the PF structure
1922 * @num_vfs: number of VFs to enable
1923 */
1924static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1925{
1926        struct device *dev = ice_pf_to_dev(pf);
1927        struct ice_hw *hw = &pf->hw;
1928        int ret;
1929
1930        /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1931        wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1932             ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1933        set_bit(ICE_OICR_INTR_DIS, pf->state);
1934        ice_flush(hw);
1935
1936        ret = pci_enable_sriov(pf->pdev, num_vfs);
1937        if (ret) {
1938                pf->num_alloc_vfs = 0;
1939                goto err_unroll_intr;
1940        }
1941
1942        ret = ice_alloc_vfs(pf, num_vfs);
1943        if (ret)
1944                goto err_pci_disable_sriov;
1945
1946        if (ice_set_per_vf_res(pf)) {
1947                dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1948                        num_vfs);
1949                ret = -ENOSPC;
1950                goto err_unroll_sriov;
1951        }
1952
1953        ice_set_dflt_settings_vfs(pf);
1954
1955        if (ice_start_vfs(pf)) {
1956                dev_err(dev, "Failed to start VF(s)\n");
1957                ret = -EAGAIN;
1958                goto err_unroll_sriov;
1959        }
1960
1961        clear_bit(ICE_VF_DIS, pf->state);
1962        return 0;
1963
1964err_unroll_sriov:
1965        devm_kfree(dev, pf->vf);
1966        pf->vf = NULL;
1967        pf->num_alloc_vfs = 0;
1968err_pci_disable_sriov:
1969        pci_disable_sriov(pf->pdev);
1970err_unroll_intr:
1971        /* rearm interrupts here */
1972        ice_irq_dynamic_ena(hw, NULL, NULL);
1973        clear_bit(ICE_OICR_INTR_DIS, pf->state);
1974        return ret;
1975}
1976
1977/**
1978 * ice_pci_sriov_ena - Enable or change number of VFs
1979 * @pf: pointer to the PF structure
1980 * @num_vfs: number of VFs to allocate
1981 *
1982 * Returns 0 on success and negative on failure
1983 */
1984static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1985{
1986        int pre_existing_vfs = pci_num_vf(pf->pdev);
1987        struct device *dev = ice_pf_to_dev(pf);
1988        int err;
1989
1990        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1991                ice_free_vfs(pf);
1992        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1993                return 0;
1994
1995        if (num_vfs > pf->num_vfs_supported) {
1996                dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1997                        num_vfs, pf->num_vfs_supported);
1998                return -EOPNOTSUPP;
1999        }
2000
2001        dev_info(dev, "Enabling %d VFs\n", num_vfs);
2002        err = ice_ena_vfs(pf, num_vfs);
2003        if (err) {
2004                dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
2005                return err;
2006        }
2007
2008        set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
2009        return 0;
2010}
2011
2012/**
2013 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
2014 * @pf: PF to enabled SR-IOV on
2015 */
2016static int ice_check_sriov_allowed(struct ice_pf *pf)
2017{
2018        struct device *dev = ice_pf_to_dev(pf);
2019
2020        if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
2021                dev_err(dev, "This device is not capable of SR-IOV\n");
2022                return -EOPNOTSUPP;
2023        }
2024
2025        if (ice_is_safe_mode(pf)) {
2026                dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
2027                return -EOPNOTSUPP;
2028        }
2029
2030        if (!ice_pf_state_is_nominal(pf)) {
2031                dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
2032                return -EBUSY;
2033        }
2034
2035        return 0;
2036}
2037
2038/**
2039 * ice_sriov_configure - Enable or change number of VFs via sysfs
2040 * @pdev: pointer to a pci_dev structure
2041 * @num_vfs: number of VFs to allocate or 0 to free VFs
2042 *
2043 * This function is called when the user updates the number of VFs in sysfs. On
2044 * success return whatever num_vfs was set to by the caller. Return negative on
2045 * failure.
2046 */
2047int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
2048{
2049        struct ice_pf *pf = pci_get_drvdata(pdev);
2050        struct device *dev = ice_pf_to_dev(pf);
2051        enum ice_status status;
2052        int err;
2053
2054        err = ice_check_sriov_allowed(pf);
2055        if (err)
2056                return err;
2057
2058        if (!num_vfs) {
2059                if (!pci_vfs_assigned(pdev)) {
2060                        ice_mbx_deinit_snapshot(&pf->hw);
2061                        ice_free_vfs(pf);
2062                        if (pf->lag)
2063                                ice_enable_lag(pf->lag);
2064                        return 0;
2065                }
2066
2067                dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
2068                return -EBUSY;
2069        }
2070
2071        status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
2072        if (status)
2073                return ice_status_to_errno(status);
2074
2075        err = ice_pci_sriov_ena(pf, num_vfs);
2076        if (err) {
2077                ice_mbx_deinit_snapshot(&pf->hw);
2078                return err;
2079        }
2080
2081        if (pf->lag)
2082                ice_disable_lag(pf->lag);
2083        return num_vfs;
2084}
2085
2086/**
2087 * ice_process_vflr_event - Free VF resources via IRQ calls
2088 * @pf: pointer to the PF structure
2089 *
2090 * called from the VFLR IRQ handler to
2091 * free up VF resources and state variables
2092 */
2093void ice_process_vflr_event(struct ice_pf *pf)
2094{
2095        struct ice_hw *hw = &pf->hw;
2096        unsigned int vf_id;
2097        u32 reg;
2098
2099        if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2100            !pf->num_alloc_vfs)
2101                return;
2102
2103        ice_for_each_vf(pf, vf_id) {
2104                struct ice_vf *vf = &pf->vf[vf_id];
2105                u32 reg_idx, bit_idx;
2106
2107                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2108                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2109                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2110                reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
2111                if (reg & BIT(bit_idx))
2112                        /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
2113                        ice_reset_vf(vf, true);
2114        }
2115}
2116
2117/**
2118 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
2119 * @vf: pointer to the VF info
2120 */
2121static void ice_vc_reset_vf(struct ice_vf *vf)
2122{
2123        ice_vc_notify_vf_reset(vf);
2124        ice_reset_vf(vf, false);
2125}
2126
2127/**
2128 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
2129 * @pf: PF used to index all VFs
2130 * @pfq: queue index relative to the PF's function space
2131 *
2132 * If no VF is found who owns the pfq then return NULL, otherwise return a
2133 * pointer to the VF who owns the pfq
2134 */
2135static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
2136{
2137        unsigned int vf_id;
2138
2139        ice_for_each_vf(pf, vf_id) {
2140                struct ice_vf *vf = &pf->vf[vf_id];
2141                struct ice_vsi *vsi;
2142                u16 rxq_idx;
2143
2144                vsi = ice_get_vf_vsi(vf);
2145
2146                ice_for_each_rxq(vsi, rxq_idx)
2147                        if (vsi->rxq_map[rxq_idx] == pfq)
2148                                return vf;
2149        }
2150
2151        return NULL;
2152}
2153
2154/**
2155 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
2156 * @pf: PF used for conversion
2157 * @globalq: global queue index used to convert to PF space queue index
2158 */
2159static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
2160{
2161        return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
2162}
2163
2164/**
2165 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
2166 * @pf: PF that the LAN overflow event happened on
2167 * @event: structure holding the event information for the LAN overflow event
2168 *
2169 * Determine if the LAN overflow event was caused by a VF queue. If it was not
2170 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
2171 * reset on the offending VF.
2172 */
2173void
2174ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2175{
2176        u32 gldcb_rtctq, queue;
2177        struct ice_vf *vf;
2178
2179        gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
2180        dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
2181
2182        /* event returns device global Rx queue number */
2183        queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
2184                GLDCB_RTCTQ_RXQNUM_S;
2185
2186        vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
2187        if (!vf)
2188                return;
2189
2190        ice_vc_reset_vf(vf);
2191}
2192
2193/**
2194 * ice_vc_send_msg_to_vf - Send message to VF
2195 * @vf: pointer to the VF info
2196 * @v_opcode: virtual channel opcode
2197 * @v_retval: virtual channel return value
2198 * @msg: pointer to the msg buffer
2199 * @msglen: msg length
2200 *
2201 * send msg to VF
2202 */
2203int
2204ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
2205                      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
2206{
2207        enum ice_status aq_ret;
2208        struct device *dev;
2209        struct ice_pf *pf;
2210
2211        if (!vf)
2212                return -EINVAL;
2213
2214        pf = vf->pf;
2215        if (ice_validate_vf_id(pf, vf->vf_id))
2216                return -EINVAL;
2217
2218        dev = ice_pf_to_dev(pf);
2219
2220        /* single place to detect unsuccessful return values */
2221        if (v_retval) {
2222                vf->num_inval_msgs++;
2223                dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
2224                         v_opcode, v_retval);
2225                if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
2226                        dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
2227                                vf->vf_id);
2228                        dev_err(dev, "Use PF Control I/F to enable the VF\n");
2229                        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2230                        return -EIO;
2231                }
2232        } else {
2233                vf->num_valid_msgs++;
2234                /* reset the invalid counter, if a valid message is received. */
2235                vf->num_inval_msgs = 0;
2236        }
2237
2238        aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
2239                                       msg, msglen, NULL);
2240        if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
2241                dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
2242                         vf->vf_id, ice_stat_str(aq_ret),
2243                         ice_aq_str(pf->hw.mailboxq.sq_last_status));
2244                return -EIO;
2245        }
2246
2247        return 0;
2248}
2249
2250/**
2251 * ice_vc_get_ver_msg
2252 * @vf: pointer to the VF info
2253 * @msg: pointer to the msg buffer
2254 *
2255 * called from the VF to request the API version used by the PF
2256 */
2257static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
2258{
2259        struct virtchnl_version_info info = {
2260                VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2261        };
2262
2263        vf->vf_ver = *(struct virtchnl_version_info *)msg;
2264        /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2265        if (VF_IS_V10(&vf->vf_ver))
2266                info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2267
2268        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2269                                     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
2270                                     sizeof(struct virtchnl_version_info));
2271}
2272
2273/**
2274 * ice_vc_get_max_frame_size - get max frame size allowed for VF
2275 * @vf: VF used to determine max frame size
2276 *
2277 * Max frame size is determined based on the current port's max frame size and
2278 * whether a port VLAN is configured on this VF. The VF is not aware whether
2279 * it's in a port VLAN so the PF needs to account for this in max frame size
2280 * checks and sending the max frame size to the VF.
2281 */
2282static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
2283{
2284        struct ice_port_info *pi = ice_vf_get_port_info(vf);
2285        u16 max_frame_size;
2286
2287        max_frame_size = pi->phy.link_info.max_frame_size;
2288
2289        if (vf->port_vlan_info)
2290                max_frame_size -= VLAN_HLEN;
2291
2292        return max_frame_size;
2293}
2294
2295/**
2296 * ice_vc_get_vf_res_msg
2297 * @vf: pointer to the VF info
2298 * @msg: pointer to the msg buffer
2299 *
2300 * called from the VF to request its resources
2301 */
2302static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
2303{
2304        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2305        struct virtchnl_vf_resource *vfres = NULL;
2306        struct ice_pf *pf = vf->pf;
2307        struct ice_vsi *vsi;
2308        int len = 0;
2309        int ret;
2310
2311        if (ice_check_vf_init(pf, vf)) {
2312                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2313                goto err;
2314        }
2315
2316        len = sizeof(struct virtchnl_vf_resource);
2317
2318        vfres = kzalloc(len, GFP_KERNEL);
2319        if (!vfres) {
2320                v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2321                len = 0;
2322                goto err;
2323        }
2324        if (VF_IS_V11(&vf->vf_ver))
2325                vf->driver_caps = *(u32 *)msg;
2326        else
2327                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2328                                  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2329                                  VIRTCHNL_VF_OFFLOAD_VLAN;
2330
2331        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2332        vsi = ice_get_vf_vsi(vf);
2333        if (!vsi) {
2334                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2335                goto err;
2336        }
2337
2338        if (!vsi->info.pvid)
2339                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2340
2341        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2342                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2343        } else {
2344                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
2345                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2346                else
2347                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2348        }
2349
2350        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
2351                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
2352
2353        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2354                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2355
2356        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2357                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2358
2359        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2360                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2361
2362        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2363                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2364
2365        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2366                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2367
2368        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2369                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2370
2371        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2372                vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2373
2374        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
2375                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
2376
2377        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
2378                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
2379
2380        vfres->num_vsis = 1;
2381        /* Tx and Rx queue are equal for VF */
2382        vfres->num_queue_pairs = vsi->num_txq;
2383        vfres->max_vectors = pf->num_msix_per_vf;
2384        vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2385        vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
2386        vfres->max_mtu = ice_vc_get_max_frame_size(vf);
2387
2388        vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2389        vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2390        vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2391        ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2392                        vf->dflt_lan_addr.addr);
2393
2394        /* match guest capabilities */
2395        vf->driver_caps = vfres->vf_cap_flags;
2396
2397        ice_vc_set_caps_allowlist(vf);
2398        ice_vc_set_working_allowlist(vf);
2399
2400        set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2401
2402err:
2403        /* send the response back to the VF */
2404        ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2405                                    (u8 *)vfres, len);
2406
2407        kfree(vfres);
2408        return ret;
2409}
2410
2411/**
2412 * ice_vc_reset_vf_msg
2413 * @vf: pointer to the VF info
2414 *
2415 * called from the VF to reset itself,
2416 * unlike other virtchnl messages, PF driver
2417 * doesn't send the response back to the VF
2418 */
2419static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2420{
2421        if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2422                ice_reset_vf(vf, false);
2423}
2424
2425/**
2426 * ice_find_vsi_from_id
2427 * @pf: the PF structure to search for the VSI
2428 * @id: ID of the VSI it is searching for
2429 *
2430 * searches for the VSI with the given ID
2431 */
2432static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2433{
2434        int i;
2435
2436        ice_for_each_vsi(pf, i)
2437                if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2438                        return pf->vsi[i];
2439
2440        return NULL;
2441}
2442
2443/**
2444 * ice_vc_isvalid_vsi_id
2445 * @vf: pointer to the VF info
2446 * @vsi_id: VF relative VSI ID
2447 *
2448 * check for the valid VSI ID
2449 */
2450bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2451{
2452        struct ice_pf *pf = vf->pf;
2453        struct ice_vsi *vsi;
2454
2455        vsi = ice_find_vsi_from_id(pf, vsi_id);
2456
2457        return (vsi && (vsi->vf_id == vf->vf_id));
2458}
2459
2460/**
2461 * ice_vc_isvalid_q_id
2462 * @vf: pointer to the VF info
2463 * @vsi_id: VSI ID
2464 * @qid: VSI relative queue ID
2465 *
2466 * check for the valid queue ID
2467 */
2468static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2469{
2470        struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2471        /* allocated Tx and Rx queues should be always equal for VF VSI */
2472        return (vsi && (qid < vsi->alloc_txq));
2473}
2474
2475/**
2476 * ice_vc_isvalid_ring_len
2477 * @ring_len: length of ring
2478 *
2479 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2480 * or zero
2481 */
2482static bool ice_vc_isvalid_ring_len(u16 ring_len)
2483{
2484        return ring_len == 0 ||
2485               (ring_len >= ICE_MIN_NUM_DESC &&
2486                ring_len <= ICE_MAX_NUM_DESC &&
2487                !(ring_len % ICE_REQ_DESC_MULTIPLE));
2488}
2489
2490/**
2491 * ice_vc_parse_rss_cfg - parses hash fields and headers from
2492 * a specific virtchnl RSS cfg
2493 * @hw: pointer to the hardware
2494 * @rss_cfg: pointer to the virtchnl RSS cfg
2495 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
2496 * to configure
2497 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
2498 *
2499 * Return true if all the protocol header and hash fields in the RSS cfg could
2500 * be parsed, else return false
2501 *
2502 * This function parses the virtchnl RSS cfg to be the intended
2503 * hash fields and the intended header for RSS configuration
2504 */
2505static bool
2506ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
2507                     u32 *addl_hdrs, u64 *hash_flds)
2508{
2509        const struct ice_vc_hash_field_match_type *hf_list;
2510        const struct ice_vc_hdr_match_type *hdr_list;
2511        int i, hf_list_len, hdr_list_len;
2512
2513        if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
2514                     sizeof(hw->active_pkg_name))) {
2515                hf_list = ice_vc_hash_field_list_comms;
2516                hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
2517                hdr_list = ice_vc_hdr_list_comms;
2518                hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
2519        } else {
2520                hf_list = ice_vc_hash_field_list_os;
2521                hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
2522                hdr_list = ice_vc_hdr_list_os;
2523                hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
2524        }
2525
2526        for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
2527                struct virtchnl_proto_hdr *proto_hdr =
2528                                        &rss_cfg->proto_hdrs.proto_hdr[i];
2529                bool hdr_found = false;
2530                int j;
2531
2532                /* Find matched ice headers according to virtchnl headers. */
2533                for (j = 0; j < hdr_list_len; j++) {
2534                        struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
2535
2536                        if (proto_hdr->type == hdr_map.vc_hdr) {
2537                                *addl_hdrs |= hdr_map.ice_hdr;
2538                                hdr_found = true;
2539                        }
2540                }
2541
2542                if (!hdr_found)
2543                        return false;
2544
2545                /* Find matched ice hash fields according to
2546                 * virtchnl hash fields.
2547                 */
2548                for (j = 0; j < hf_list_len; j++) {
2549                        struct ice_vc_hash_field_match_type hf_map = hf_list[j];
2550
2551                        if (proto_hdr->type == hf_map.vc_hdr &&
2552                            proto_hdr->field_selector == hf_map.vc_hash_field) {
2553                                *hash_flds |= hf_map.ice_hash_field;
2554                                break;
2555                        }
2556                }
2557        }
2558
2559        return true;
2560}
2561
2562/**
2563 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
2564 * RSS offloads
2565 * @caps: VF driver negotiated capabilities
2566 *
2567 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
2568 * else return false
2569 */
2570static bool ice_vf_adv_rss_offload_ena(u32 caps)
2571{
2572        return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
2573}
2574
2575/**
2576 * ice_vc_handle_rss_cfg
2577 * @vf: pointer to the VF info
2578 * @msg: pointer to the message buffer
2579 * @add: add a RSS config if true, otherwise delete a RSS config
2580 *
2581 * This function adds/deletes a RSS config
2582 */
2583static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
2584{
2585        u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
2586        struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
2587        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2588        struct device *dev = ice_pf_to_dev(vf->pf);
2589        struct ice_hw *hw = &vf->pf->hw;
2590        struct ice_vsi *vsi;
2591
2592        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2593                dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
2594                        vf->vf_id);
2595                v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2596                goto error_param;
2597        }
2598
2599        if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
2600                dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
2601                        vf->vf_id);
2602                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2603                goto error_param;
2604        }
2605
2606        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2607                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2608                goto error_param;
2609        }
2610
2611        if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
2612            rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
2613            rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
2614                dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
2615                        vf->vf_id);
2616                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2617                goto error_param;
2618        }
2619
2620        vsi = ice_get_vf_vsi(vf);
2621        if (!vsi) {
2622                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2623                goto error_param;
2624        }
2625
2626        if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
2627                struct ice_vsi_ctx *ctx;
2628                enum ice_status status;
2629                u8 lut_type, hash_type;
2630
2631                lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
2632                hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
2633                                ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
2634
2635                ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2636                if (!ctx) {
2637                        v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2638                        goto error_param;
2639                }
2640
2641                ctx->info.q_opt_rss = ((lut_type <<
2642                                        ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
2643                                       ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
2644                                       (hash_type &
2645                                        ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
2646
2647                /* Preserve existing queueing option setting */
2648                ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
2649                                          ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
2650                ctx->info.q_opt_tc = vsi->info.q_opt_tc;
2651                ctx->info.q_opt_flags = vsi->info.q_opt_rss;
2652
2653                ctx->info.valid_sections =
2654                                cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
2655
2656                status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
2657                if (status) {
2658                        dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
2659                                ice_stat_str(status),
2660                                ice_aq_str(hw->adminq.sq_last_status));
2661                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2662                } else {
2663                        vsi->info.q_opt_rss = ctx->info.q_opt_rss;
2664                }
2665
2666                kfree(ctx);
2667        } else {
2668                u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2669                u64 hash_flds = ICE_HASH_INVALID;
2670
2671                if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
2672                                          &hash_flds)) {
2673                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2674                        goto error_param;
2675                }
2676
2677                if (add) {
2678                        if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
2679                                            addl_hdrs)) {
2680                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2681                                dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
2682                                        vsi->vsi_num, v_ret);
2683                        }
2684                } else {
2685                        enum ice_status status;
2686
2687                        status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
2688                                                 addl_hdrs);
2689                        /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
2690                         * if two configurations share the same profile remove
2691                         * one of them actually removes both, since the
2692                         * profile is deleted.
2693                         */
2694                        if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2695                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2696                                dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
2697                                        vf->vf_id, ice_stat_str(status));
2698                        }
2699                }
2700        }
2701
2702error_param:
2703        return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
2704}
2705
2706/**
2707 * ice_vc_config_rss_key
2708 * @vf: pointer to the VF info
2709 * @msg: pointer to the msg buffer
2710 *
2711 * Configure the VF's RSS key
2712 */
2713static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2714{
2715        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2716        struct virtchnl_rss_key *vrk =
2717                (struct virtchnl_rss_key *)msg;
2718        struct ice_vsi *vsi;
2719
2720        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2721                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2722                goto error_param;
2723        }
2724
2725        if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2726                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2727                goto error_param;
2728        }
2729
2730        if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2731                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2732                goto error_param;
2733        }
2734
2735        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2736                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2737                goto error_param;
2738        }
2739
2740        vsi = ice_get_vf_vsi(vf);
2741        if (!vsi) {
2742                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2743                goto error_param;
2744        }
2745
2746        if (ice_set_rss_key(vsi, vrk->key))
2747                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2748error_param:
2749        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2750                                     NULL, 0);
2751}
2752
2753/**
2754 * ice_vc_config_rss_lut
2755 * @vf: pointer to the VF info
2756 * @msg: pointer to the msg buffer
2757 *
2758 * Configure the VF's RSS LUT
2759 */
2760static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2761{
2762        struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2763        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2764        struct ice_vsi *vsi;
2765
2766        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2767                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2768                goto error_param;
2769        }
2770
2771        if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2772                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2773                goto error_param;
2774        }
2775
2776        if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2777                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2778                goto error_param;
2779        }
2780
2781        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2782                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2783                goto error_param;
2784        }
2785
2786        vsi = ice_get_vf_vsi(vf);
2787        if (!vsi) {
2788                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2789                goto error_param;
2790        }
2791
2792        if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2793                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2794error_param:
2795        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2796                                     NULL, 0);
2797}
2798
2799/**
2800 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2801 * @vf: The VF being resseting
2802 *
2803 * The max poll time is about ~800ms, which is about the maximum time it takes
2804 * for a VF to be reset and/or a VF driver to be removed.
2805 */
2806static void ice_wait_on_vf_reset(struct ice_vf *vf)
2807{
2808        int i;
2809
2810        for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2811                if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2812                        break;
2813                msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2814        }
2815}
2816
2817/**
2818 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2819 * @vf: VF to check if it's ready to be configured/queried
2820 *
2821 * The purpose of this function is to make sure the VF is not in reset, not
2822 * disabled, and initialized so it can be configured and/or queried by a host
2823 * administrator.
2824 */
2825static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2826{
2827        struct ice_pf *pf;
2828
2829        ice_wait_on_vf_reset(vf);
2830
2831        if (ice_is_vf_disabled(vf))
2832                return -EINVAL;
2833
2834        pf = vf->pf;
2835        if (ice_check_vf_init(pf, vf))
2836                return -EBUSY;
2837
2838        return 0;
2839}
2840
2841/**
2842 * ice_set_vf_spoofchk
2843 * @netdev: network interface device structure
2844 * @vf_id: VF identifier
2845 * @ena: flag to enable or disable feature
2846 *
2847 * Enable or disable VF spoof checking
2848 */
2849int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2850{
2851        struct ice_netdev_priv *np = netdev_priv(netdev);
2852        struct ice_pf *pf = np->vsi->back;
2853        struct ice_vsi_ctx *ctx;
2854        struct ice_vsi *vf_vsi;
2855        enum ice_status status;
2856        struct device *dev;
2857        struct ice_vf *vf;
2858        int ret;
2859
2860        dev = ice_pf_to_dev(pf);
2861        if (ice_validate_vf_id(pf, vf_id))
2862                return -EINVAL;
2863
2864        vf = &pf->vf[vf_id];
2865        ret = ice_check_vf_ready_for_cfg(vf);
2866        if (ret)
2867                return ret;
2868
2869        vf_vsi = ice_get_vf_vsi(vf);
2870        if (!vf_vsi) {
2871                netdev_err(netdev, "VSI %d for VF %d is null\n",
2872                           vf->lan_vsi_idx, vf->vf_id);
2873                return -EINVAL;
2874        }
2875
2876        if (vf_vsi->type != ICE_VSI_VF) {
2877                netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2878                           vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2879                return -ENODEV;
2880        }
2881
2882        if (ena == vf->spoofchk) {
2883                dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2884                return 0;
2885        }
2886
2887        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2888        if (!ctx)
2889                return -ENOMEM;
2890
2891        ctx->info.sec_flags = vf_vsi->info.sec_flags;
2892        ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2893        if (ena) {
2894                ctx->info.sec_flags |=
2895                        ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2896                        (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2897                         ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2898        } else {
2899                ctx->info.sec_flags &=
2900                        ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2901                          (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2902                           ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2903        }
2904
2905        status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2906        if (status) {
2907                dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2908                        ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2909                        ice_stat_str(status));
2910                ret = -EIO;
2911                goto out;
2912        }
2913
2914        /* only update spoofchk state and VSI context on success */
2915        vf_vsi->info.sec_flags = ctx->info.sec_flags;
2916        vf->spoofchk = ena;
2917
2918out:
2919        kfree(ctx);
2920        return ret;
2921}
2922
2923/**
2924 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2925 * @pf: PF structure for accessing VF(s)
2926 *
2927 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2928 * else return true
2929 */
2930bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2931{
2932        int vf_idx;
2933
2934        ice_for_each_vf(pf, vf_idx) {
2935                struct ice_vf *vf = &pf->vf[vf_idx];
2936
2937                /* found a VF that has promiscuous mode configured */
2938                if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2939                    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2940                        return true;
2941        }
2942
2943        return false;
2944}
2945
2946/**
2947 * ice_vc_cfg_promiscuous_mode_msg
2948 * @vf: pointer to the VF info
2949 * @msg: pointer to the msg buffer
2950 *
2951 * called from the VF to configure VF VSIs promiscuous mode
2952 */
2953static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2954{
2955        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2956        bool rm_promisc, alluni = false, allmulti = false;
2957        struct virtchnl_promisc_info *info =
2958            (struct virtchnl_promisc_info *)msg;
2959        struct ice_pf *pf = vf->pf;
2960        struct ice_vsi *vsi;
2961        struct device *dev;
2962        int ret = 0;
2963
2964        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2965                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2966                goto error_param;
2967        }
2968
2969        if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2970                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2971                goto error_param;
2972        }
2973
2974        vsi = ice_get_vf_vsi(vf);
2975        if (!vsi) {
2976                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2977                goto error_param;
2978        }
2979
2980        dev = ice_pf_to_dev(pf);
2981        if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2982                dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2983                        vf->vf_id);
2984                /* Leave v_ret alone, lie to the VF on purpose. */
2985                goto error_param;
2986        }
2987
2988        if (info->flags & FLAG_VF_UNICAST_PROMISC)
2989                alluni = true;
2990
2991        if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2992                allmulti = true;
2993
2994        rm_promisc = !allmulti && !alluni;
2995
2996        if (vsi->num_vlan || vf->port_vlan_info) {
2997                struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2998                struct net_device *pf_netdev;
2999
3000                if (!pf_vsi) {
3001                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3002                        goto error_param;
3003                }
3004
3005                pf_netdev = pf_vsi->netdev;
3006
3007                ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
3008                if (ret) {
3009                        dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
3010                                rm_promisc ? "ON" : "OFF", vf->vf_id,
3011                                vsi->vsi_num);
3012                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3013                }
3014
3015                ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
3016                if (ret) {
3017                        dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
3018                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3019                        goto error_param;
3020                }
3021        }
3022
3023        if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
3024                bool set_dflt_vsi = alluni || allmulti;
3025
3026                if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
3027                        /* only attempt to set the default forwarding VSI if
3028                         * it's not currently set
3029                         */
3030                        ret = ice_set_dflt_vsi(pf->first_sw, vsi);
3031                else if (!set_dflt_vsi &&
3032                         ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
3033                        /* only attempt to free the default forwarding VSI if we
3034                         * are the owner
3035                         */
3036                        ret = ice_clear_dflt_vsi(pf->first_sw);
3037
3038                if (ret) {
3039                        dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
3040                                set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
3041                        v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3042                        goto error_param;
3043                }
3044        } else {
3045                enum ice_status status;
3046                u8 promisc_m;
3047
3048                if (alluni) {
3049                        if (vf->port_vlan_info || vsi->num_vlan)
3050                                promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3051                        else
3052                                promisc_m = ICE_UCAST_PROMISC_BITS;
3053                } else if (allmulti) {
3054                        if (vf->port_vlan_info || vsi->num_vlan)
3055                                promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3056                        else
3057                                promisc_m = ICE_MCAST_PROMISC_BITS;
3058                } else {
3059                        if (vf->port_vlan_info || vsi->num_vlan)
3060                                promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3061                        else
3062                                promisc_m = ICE_UCAST_PROMISC_BITS;
3063                }
3064
3065                /* Configure multicast/unicast with or without VLAN promiscuous
3066                 * mode
3067                 */
3068                status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
3069                if (status) {
3070                        dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
3071                                rm_promisc ? "dis" : "en", vf->vf_id,
3072                                ice_stat_str(status));
3073                        v_ret = ice_err_to_virt_err(status);
3074                        goto error_param;
3075                } else {
3076                        dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
3077                                rm_promisc ? "dis" : "en", vf->vf_id);
3078                }
3079        }
3080
3081        if (allmulti &&
3082            !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3083                dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
3084        else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3085                dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
3086
3087        if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3088                dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
3089        else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3090                dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
3091
3092error_param:
3093        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3094                                     v_ret, NULL, 0);
3095}
3096
3097/**
3098 * ice_vc_get_stats_msg
3099 * @vf: pointer to the VF info
3100 * @msg: pointer to the msg buffer
3101 *
3102 * called from the VF to get VSI stats
3103 */
3104static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
3105{
3106        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3107        struct virtchnl_queue_select *vqs =
3108                (struct virtchnl_queue_select *)msg;
3109        struct ice_eth_stats stats = { 0 };
3110        struct ice_vsi *vsi;
3111
3112        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3113                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3114                goto error_param;
3115        }
3116
3117        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3118                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3119                goto error_param;
3120        }
3121
3122        vsi = ice_get_vf_vsi(vf);
3123        if (!vsi) {
3124                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3125                goto error_param;
3126        }
3127
3128        ice_update_eth_stats(vsi);
3129
3130        stats = vsi->eth_stats;
3131
3132error_param:
3133        /* send the response to the VF */
3134        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
3135                                     (u8 *)&stats, sizeof(stats));
3136}
3137
3138/**
3139 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
3140 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
3141 *
3142 * Return true on successful validation, else false
3143 */
3144static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
3145{
3146        if ((!vqs->rx_queues && !vqs->tx_queues) ||
3147            vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
3148            vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
3149                return false;
3150
3151        return true;
3152}
3153
3154/**
3155 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
3156 * @vsi: VSI of the VF to configure
3157 * @q_idx: VF queue index used to determine the queue in the PF's space
3158 */
3159static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3160{
3161        struct ice_hw *hw = &vsi->back->hw;
3162        u32 pfq = vsi->txq_map[q_idx];
3163        u32 reg;
3164
3165        reg = rd32(hw, QINT_TQCTL(pfq));
3166
3167        /* MSI-X index 0 in the VF's space is always for the OICR, which means
3168         * this is most likely a poll mode VF driver, so don't enable an
3169         * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3170         */
3171        if (!(reg & QINT_TQCTL_MSIX_INDX_M))
3172                return;
3173
3174        wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
3175}
3176
3177/**
3178 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
3179 * @vsi: VSI of the VF to configure
3180 * @q_idx: VF queue index used to determine the queue in the PF's space
3181 */
3182static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3183{
3184        struct ice_hw *hw = &vsi->back->hw;
3185        u32 pfq = vsi->rxq_map[q_idx];
3186        u32 reg;
3187
3188        reg = rd32(hw, QINT_RQCTL(pfq));
3189
3190        /* MSI-X index 0 in the VF's space is always for the OICR, which means
3191         * this is most likely a poll mode VF driver, so don't enable an
3192         * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3193         */
3194        if (!(reg & QINT_RQCTL_MSIX_INDX_M))
3195                return;
3196
3197        wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
3198}
3199
3200/**
3201 * ice_vc_ena_qs_msg
3202 * @vf: pointer to the VF info
3203 * @msg: pointer to the msg buffer
3204 *
3205 * called from the VF to enable all or specific queue(s)
3206 */
3207static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
3208{
3209        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3210        struct virtchnl_queue_select *vqs =
3211            (struct virtchnl_queue_select *)msg;
3212        struct ice_vsi *vsi;
3213        unsigned long q_map;
3214        u16 vf_q_id;
3215
3216        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3217                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3218                goto error_param;
3219        }
3220
3221        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3222                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3223                goto error_param;
3224        }
3225
3226        if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3227                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3228                goto error_param;
3229        }
3230
3231        vsi = ice_get_vf_vsi(vf);
3232        if (!vsi) {
3233                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3234                goto error_param;
3235        }
3236
3237        /* Enable only Rx rings, Tx rings were enabled by the FW when the
3238         * Tx queue group list was configured and the context bits were
3239         * programmed using ice_vsi_cfg_txqs
3240         */
3241        q_map = vqs->rx_queues;
3242        for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3243                if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3244                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3245                        goto error_param;
3246                }
3247
3248                /* Skip queue if enabled */
3249                if (test_bit(vf_q_id, vf->rxq_ena))
3250                        continue;
3251
3252                if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
3253                        dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
3254                                vf_q_id, vsi->vsi_num);
3255                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3256                        goto error_param;
3257                }
3258
3259                ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
3260                set_bit(vf_q_id, vf->rxq_ena);
3261        }
3262
3263        q_map = vqs->tx_queues;
3264        for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3265                if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3266                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3267                        goto error_param;
3268                }
3269
3270                /* Skip queue if enabled */
3271                if (test_bit(vf_q_id, vf->txq_ena))
3272                        continue;
3273
3274                ice_vf_ena_txq_interrupt(vsi, vf_q_id);
3275                set_bit(vf_q_id, vf->txq_ena);
3276        }
3277
3278        /* Set flag to indicate that queues are enabled */
3279        if (v_ret == VIRTCHNL_STATUS_SUCCESS)
3280                set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3281
3282error_param:
3283        /* send the response to the VF */
3284        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
3285                                     NULL, 0);
3286}
3287
3288/**
3289 * ice_vc_dis_qs_msg
3290 * @vf: pointer to the VF info
3291 * @msg: pointer to the msg buffer
3292 *
3293 * called from the VF to disable all or specific
3294 * queue(s)
3295 */
3296static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
3297{
3298        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3299        struct virtchnl_queue_select *vqs =
3300            (struct virtchnl_queue_select *)msg;
3301        struct ice_vsi *vsi;
3302        unsigned long q_map;
3303        u16 vf_q_id;
3304
3305        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
3306            !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
3307                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3308                goto error_param;
3309        }
3310
3311        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3312                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3313                goto error_param;
3314        }
3315
3316        if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3317                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3318                goto error_param;
3319        }
3320
3321        vsi = ice_get_vf_vsi(vf);
3322        if (!vsi) {
3323                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3324                goto error_param;
3325        }
3326
3327        if (vqs->tx_queues) {
3328                q_map = vqs->tx_queues;
3329
3330                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3331                        struct ice_ring *ring = vsi->tx_rings[vf_q_id];
3332                        struct ice_txq_meta txq_meta = { 0 };
3333
3334                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3335                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3336                                goto error_param;
3337                        }
3338
3339                        /* Skip queue if not enabled */
3340                        if (!test_bit(vf_q_id, vf->txq_ena))
3341                                continue;
3342
3343                        ice_fill_txq_meta(vsi, ring, &txq_meta);
3344
3345                        if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
3346                                                 ring, &txq_meta)) {
3347                                dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
3348                                        vf_q_id, vsi->vsi_num);
3349                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3350                                goto error_param;
3351                        }
3352
3353                        /* Clear enabled queues flag */
3354                        clear_bit(vf_q_id, vf->txq_ena);
3355                }
3356        }
3357
3358        q_map = vqs->rx_queues;
3359        /* speed up Rx queue disable by batching them if possible */
3360        if (q_map &&
3361            bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
3362                if (ice_vsi_stop_all_rx_rings(vsi)) {
3363                        dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
3364                                vsi->vsi_num);
3365                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3366                        goto error_param;
3367                }
3368
3369                bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
3370        } else if (q_map) {
3371                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3372                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3373                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3374                                goto error_param;
3375                        }
3376
3377                        /* Skip queue if not enabled */
3378                        if (!test_bit(vf_q_id, vf->rxq_ena))
3379                                continue;
3380
3381                        if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
3382                                                     true)) {
3383                                dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
3384                                        vf_q_id, vsi->vsi_num);
3385                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3386                                goto error_param;
3387                        }
3388
3389                        /* Clear enabled queues flag */
3390                        clear_bit(vf_q_id, vf->rxq_ena);
3391                }
3392        }
3393
3394        /* Clear enabled queues flag */
3395        if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
3396                clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3397
3398error_param:
3399        /* send the response to the VF */
3400        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
3401                                     NULL, 0);
3402}
3403
3404/**
3405 * ice_cfg_interrupt
3406 * @vf: pointer to the VF info
3407 * @vsi: the VSI being configured
3408 * @vector_id: vector ID
3409 * @map: vector map for mapping vectors to queues
3410 * @q_vector: structure for interrupt vector
3411 * configure the IRQ to queue map
3412 */
3413static int
3414ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
3415                  struct virtchnl_vector_map *map,
3416                  struct ice_q_vector *q_vector)
3417{
3418        u16 vsi_q_id, vsi_q_id_idx;
3419        unsigned long qmap;
3420
3421        q_vector->num_ring_rx = 0;
3422        q_vector->num_ring_tx = 0;
3423
3424        qmap = map->rxq_map;
3425        for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3426                vsi_q_id = vsi_q_id_idx;
3427
3428                if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3429                        return VIRTCHNL_STATUS_ERR_PARAM;
3430
3431                q_vector->num_ring_rx++;
3432                q_vector->rx.itr_idx = map->rxitr_idx;
3433                vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
3434                ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
3435                                      q_vector->rx.itr_idx);
3436        }
3437
3438        qmap = map->txq_map;
3439        for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3440                vsi_q_id = vsi_q_id_idx;
3441
3442                if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3443                        return VIRTCHNL_STATUS_ERR_PARAM;
3444
3445                q_vector->num_ring_tx++;
3446                q_vector->tx.itr_idx = map->txitr_idx;
3447                vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
3448                ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
3449                                      q_vector->tx.itr_idx);
3450        }
3451
3452        return VIRTCHNL_STATUS_SUCCESS;
3453}
3454
3455/**
3456 * ice_vc_cfg_irq_map_msg
3457 * @vf: pointer to the VF info
3458 * @msg: pointer to the msg buffer
3459 *
3460 * called from the VF to configure the IRQ to queue map
3461 */
3462static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
3463{
3464        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3465        u16 num_q_vectors_mapped, vsi_id, vector_id;
3466        struct virtchnl_irq_map_info *irqmap_info;
3467        struct virtchnl_vector_map *map;
3468        struct ice_pf *pf = vf->pf;
3469        struct ice_vsi *vsi;
3470        int i;
3471
3472        irqmap_info = (struct virtchnl_irq_map_info *)msg;
3473        num_q_vectors_mapped = irqmap_info->num_vectors;
3474
3475        /* Check to make sure number of VF vectors mapped is not greater than
3476         * number of VF vectors originally allocated, and check that
3477         * there is actually at least a single VF queue vector mapped
3478         */
3479        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3480            pf->num_msix_per_vf < num_q_vectors_mapped ||
3481            !num_q_vectors_mapped) {
3482                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3483                goto error_param;
3484        }
3485
3486        vsi = ice_get_vf_vsi(vf);
3487        if (!vsi) {
3488                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3489                goto error_param;
3490        }
3491
3492        for (i = 0; i < num_q_vectors_mapped; i++) {
3493                struct ice_q_vector *q_vector;
3494
3495                map = &irqmap_info->vecmap[i];
3496
3497                vector_id = map->vector_id;
3498                vsi_id = map->vsi_id;
3499                /* vector_id is always 0-based for each VF, and can never be
3500                 * larger than or equal to the max allowed interrupts per VF
3501                 */
3502                if (!(vector_id < pf->num_msix_per_vf) ||
3503                    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
3504                    (!vector_id && (map->rxq_map || map->txq_map))) {
3505                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3506                        goto error_param;
3507                }
3508
3509                /* No need to map VF miscellaneous or rogue vector */
3510                if (!vector_id)
3511                        continue;
3512
3513                /* Subtract non queue vector from vector_id passed by VF
3514                 * to get actual number of VSI queue vector array index
3515                 */
3516                q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
3517                if (!q_vector) {
3518                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3519                        goto error_param;
3520                }
3521
3522                /* lookout for the invalid queue index */
3523                v_ret = (enum virtchnl_status_code)
3524                        ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
3525                if (v_ret)
3526                        goto error_param;
3527        }
3528
3529error_param:
3530        /* send the response to the VF */
3531        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
3532                                     NULL, 0);
3533}
3534
3535/**
3536 * ice_vc_cfg_qs_msg
3537 * @vf: pointer to the VF info
3538 * @msg: pointer to the msg buffer
3539 *
3540 * called from the VF to configure the Rx/Tx queues
3541 */
3542static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
3543{
3544        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3545        struct virtchnl_vsi_queue_config_info *qci =
3546            (struct virtchnl_vsi_queue_config_info *)msg;
3547        struct virtchnl_queue_pair_info *qpi;
3548        u16 num_rxq = 0, num_txq = 0;
3549        struct ice_pf *pf = vf->pf;
3550        struct ice_vsi *vsi;
3551        int i;
3552
3553        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3554                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3555                goto error_param;
3556        }
3557
3558        if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
3559                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3560                goto error_param;
3561        }
3562
3563        vsi = ice_get_vf_vsi(vf);
3564        if (!vsi) {
3565                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3566                goto error_param;
3567        }
3568
3569        if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
3570            qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
3571                dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
3572                        vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3573                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3574                goto error_param;
3575        }
3576
3577        for (i = 0; i < qci->num_queue_pairs; i++) {
3578                qpi = &qci->qpair[i];
3579                if (qpi->txq.vsi_id != qci->vsi_id ||
3580                    qpi->rxq.vsi_id != qci->vsi_id ||
3581                    qpi->rxq.queue_id != qpi->txq.queue_id ||
3582                    qpi->txq.headwb_enabled ||
3583                    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3584                    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
3585                    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
3586                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3587                        goto error_param;
3588                }
3589                /* copy Tx queue info from VF into VSI */
3590                if (qpi->txq.ring_len > 0) {
3591                        num_txq++;
3592                        vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3593                        vsi->tx_rings[i]->count = qpi->txq.ring_len;
3594                }
3595
3596                /* copy Rx queue info from VF into VSI */
3597                if (qpi->rxq.ring_len > 0) {
3598                        u16 max_frame_size = ice_vc_get_max_frame_size(vf);
3599
3600                        num_rxq++;
3601                        vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3602                        vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3603
3604                        if (qpi->rxq.databuffer_size != 0 &&
3605                            (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3606                             qpi->rxq.databuffer_size < 1024)) {
3607                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3608                                goto error_param;
3609                        }
3610                        vsi->rx_buf_len = qpi->rxq.databuffer_size;
3611                        vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
3612                        if (qpi->rxq.max_pkt_size > max_frame_size ||
3613                            qpi->rxq.max_pkt_size < 64) {
3614                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3615                                goto error_param;
3616                        }
3617                }
3618
3619                vsi->max_frame = qpi->rxq.max_pkt_size;
3620                /* add space for the port VLAN since the VF driver is not
3621                 * expected to account for it in the MTU calculation
3622                 */
3623                if (vf->port_vlan_info)
3624                        vsi->max_frame += VLAN_HLEN;
3625        }
3626
3627        /* VF can request to configure less than allocated queues or default
3628         * allocated queues. So update the VSI with new number
3629         */
3630        vsi->num_txq = num_txq;
3631        vsi->num_rxq = num_rxq;
3632        /* All queues of VF VSI are in TC 0 */
3633        vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3634        vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
3635
3636        if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3637                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3638
3639error_param:
3640        /* send the response to the VF */
3641        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3642                                     NULL, 0);
3643}
3644
3645/**
3646 * ice_is_vf_trusted
3647 * @vf: pointer to the VF info
3648 */
3649static bool ice_is_vf_trusted(struct ice_vf *vf)
3650{
3651        return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3652}
3653
3654/**
3655 * ice_can_vf_change_mac
3656 * @vf: pointer to the VF info
3657 *
3658 * Return true if the VF is allowed to change its MAC filters, false otherwise
3659 */
3660static bool ice_can_vf_change_mac(struct ice_vf *vf)
3661{
3662        /* If the VF MAC address has been set administratively (via the
3663         * ndo_set_vf_mac command), then deny permission to the VF to
3664         * add/delete unicast MAC addresses, unless the VF is trusted
3665         */
3666        if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3667                return false;
3668
3669        return true;
3670}
3671
3672/**
3673 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3674 * @vf: pointer to the VF info
3675 * @vsi: pointer to the VF's VSI
3676 * @mac_addr: MAC address to add
3677 */
3678static int
3679ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3680{
3681        struct device *dev = ice_pf_to_dev(vf->pf);
3682        enum ice_status status;
3683
3684        /* default unicast MAC already added */
3685        if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3686                return 0;
3687
3688        if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3689                dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3690                return -EPERM;
3691        }
3692
3693        status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3694        if (status == ICE_ERR_ALREADY_EXISTS) {
3695                dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3696                        vf->vf_id);
3697                return -EEXIST;
3698        } else if (status) {
3699                dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3700                        mac_addr, vf->vf_id, ice_stat_str(status));
3701                return -EIO;
3702        }
3703
3704        /* Set the default LAN address to the latest unicast MAC address added
3705         * by the VF. The default LAN address is reported by the PF via
3706         * ndo_get_vf_config.
3707         */
3708        if (is_unicast_ether_addr(mac_addr))
3709                ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3710
3711        vf->num_mac++;
3712
3713        return 0;
3714}
3715
3716/**
3717 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3718 * @vf: pointer to the VF info
3719 * @vsi: pointer to the VF's VSI
3720 * @mac_addr: MAC address to delete
3721 */
3722static int
3723ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3724{
3725        struct device *dev = ice_pf_to_dev(vf->pf);
3726        enum ice_status status;
3727
3728        if (!ice_can_vf_change_mac(vf) &&
3729            ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3730                return 0;
3731
3732        status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3733        if (status == ICE_ERR_DOES_NOT_EXIST) {
3734                dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3735                        vf->vf_id);
3736                return -ENOENT;
3737        } else if (status) {
3738                dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3739                        mac_addr, vf->vf_id, ice_stat_str(status));
3740                return -EIO;
3741        }
3742
3743        if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3744                eth_zero_addr(vf->dflt_lan_addr.addr);
3745
3746        vf->num_mac--;
3747
3748        return 0;
3749}
3750
3751/**
3752 * ice_vc_handle_mac_addr_msg
3753 * @vf: pointer to the VF info
3754 * @msg: pointer to the msg buffer
3755 * @set: true if MAC filters are being set, false otherwise
3756 *
3757 * add guest MAC address filter
3758 */
3759static int
3760ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3761{
3762        int (*ice_vc_cfg_mac)
3763                (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3764        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3765        struct virtchnl_ether_addr_list *al =
3766            (struct virtchnl_ether_addr_list *)msg;
3767        struct ice_pf *pf = vf->pf;
3768        enum virtchnl_ops vc_op;
3769        struct ice_vsi *vsi;
3770        int i;
3771
3772        if (set) {
3773                vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3774                ice_vc_cfg_mac = ice_vc_add_mac_addr;
3775        } else {
3776                vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3777                ice_vc_cfg_mac = ice_vc_del_mac_addr;
3778        }
3779
3780        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3781            !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3782                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3783                goto handle_mac_exit;
3784        }
3785
3786        /* If this VF is not privileged, then we can't add more than a
3787         * limited number of addresses. Check to make sure that the
3788         * additions do not push us over the limit.
3789         */
3790        if (set && !ice_is_vf_trusted(vf) &&
3791            (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3792                dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3793                        vf->vf_id);
3794                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3795                goto handle_mac_exit;
3796        }
3797
3798        vsi = ice_get_vf_vsi(vf);
3799        if (!vsi) {
3800                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3801                goto handle_mac_exit;
3802        }
3803
3804        for (i = 0; i < al->num_elements; i++) {
3805                u8 *mac_addr = al->list[i].addr;
3806                int result;
3807
3808                if (is_broadcast_ether_addr(mac_addr) ||
3809                    is_zero_ether_addr(mac_addr))
3810                        continue;
3811
3812                result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3813                if (result == -EEXIST || result == -ENOENT) {
3814                        continue;
3815                } else if (result) {
3816                        v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3817                        goto handle_mac_exit;
3818                }
3819        }
3820
3821handle_mac_exit:
3822        /* send the response to the VF */
3823        return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3824}
3825
3826/**
3827 * ice_vc_add_mac_addr_msg
3828 * @vf: pointer to the VF info
3829 * @msg: pointer to the msg buffer
3830 *
3831 * add guest MAC address filter
3832 */
3833static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3834{
3835        return ice_vc_handle_mac_addr_msg(vf, msg, true);
3836}
3837
3838/**
3839 * ice_vc_del_mac_addr_msg
3840 * @vf: pointer to the VF info
3841 * @msg: pointer to the msg buffer
3842 *
3843 * remove guest MAC address filter
3844 */
3845static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3846{
3847        return ice_vc_handle_mac_addr_msg(vf, msg, false);
3848}
3849
3850/**
3851 * ice_vc_request_qs_msg
3852 * @vf: pointer to the VF info
3853 * @msg: pointer to the msg buffer
3854 *
3855 * VFs get a default number of queues but can use this message to request a
3856 * different number. If the request is successful, PF will reset the VF and
3857 * return 0. If unsuccessful, PF will send message informing VF of number of
3858 * available queue pairs via virtchnl message response to VF.
3859 */
3860static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3861{
3862        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3863        struct virtchnl_vf_res_request *vfres =
3864                (struct virtchnl_vf_res_request *)msg;
3865        u16 req_queues = vfres->num_queue_pairs;
3866        struct ice_pf *pf = vf->pf;
3867        u16 max_allowed_vf_queues;
3868        u16 tx_rx_queue_left;
3869        struct device *dev;
3870        u16 cur_queues;
3871
3872        dev = ice_pf_to_dev(pf);
3873        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3874                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3875                goto error_param;
3876        }
3877
3878        cur_queues = vf->num_vf_qs;
3879        tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3880                                 ice_get_avail_rxq_count(pf));
3881        max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3882        if (!req_queues) {
3883                dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3884                        vf->vf_id);
3885        } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3886                dev_err(dev, "VF %d tried to request more than %d queues.\n",
3887                        vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3888                vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3889        } else if (req_queues > cur_queues &&
3890                   req_queues - cur_queues > tx_rx_queue_left) {
3891                dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3892                         vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3893                vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3894                                               ICE_MAX_RSS_QS_PER_VF);
3895        } else {
3896                /* request is successful, then reset VF */
3897                vf->num_req_qs = req_queues;
3898                ice_vc_reset_vf(vf);
3899                dev_info(dev, "VF %