linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49#include "mlx4_stats.h"
  50
  51#define MLX4_MAC_VALID          (1ull << 63)
  52#define MLX4_PF_COUNTERS_PER_PORT       2
  53#define MLX4_VF_COUNTERS_PER_PORT       1
  54
  55struct mac_res {
  56        struct list_head list;
  57        u64 mac;
  58        int ref_count;
  59        u8 smac_index;
  60        u8 port;
  61};
  62
  63struct vlan_res {
  64        struct list_head list;
  65        u16 vlan;
  66        int ref_count;
  67        int vlan_index;
  68        u8 port;
  69};
  70
  71struct res_common {
  72        struct list_head        list;
  73        struct rb_node          node;
  74        u64                     res_id;
  75        int                     owner;
  76        int                     state;
  77        int                     from_state;
  78        int                     to_state;
  79        int                     removing;
  80        const char              *func_name;
  81};
  82
  83enum {
  84        RES_ANY_BUSY = 1
  85};
  86
  87struct res_gid {
  88        struct list_head        list;
  89        u8                      gid[16];
  90        enum mlx4_protocol      prot;
  91        enum mlx4_steer_type    steer;
  92        u64                     reg_id;
  93};
  94
  95enum res_qp_states {
  96        RES_QP_BUSY = RES_ANY_BUSY,
  97
  98        /* QP number was allocated */
  99        RES_QP_RESERVED,
 100
 101        /* ICM memory for QP context was mapped */
 102        RES_QP_MAPPED,
 103
 104        /* QP is in hw ownership */
 105        RES_QP_HW
 106};
 107
 108struct res_qp {
 109        struct res_common       com;
 110        struct res_mtt         *mtt;
 111        struct res_cq          *rcq;
 112        struct res_cq          *scq;
 113        struct res_srq         *srq;
 114        struct list_head        mcg_list;
 115        spinlock_t              mcg_spl;
 116        int                     local_qpn;
 117        atomic_t                ref_count;
 118        u32                     qpc_flags;
 119        /* saved qp params before VST enforcement in order to restore on VGT */
 120        u8                      sched_queue;
 121        __be32                  param3;
 122        u8                      vlan_control;
 123        u8                      fvl_rx;
 124        u8                      pri_path_fl;
 125        u8                      vlan_index;
 126        u8                      feup;
 127};
 128
 129enum res_mtt_states {
 130        RES_MTT_BUSY = RES_ANY_BUSY,
 131        RES_MTT_ALLOCATED,
 132};
 133
 134static inline const char *mtt_states_str(enum res_mtt_states state)
 135{
 136        switch (state) {
 137        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 138        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 139        default: return "Unknown";
 140        }
 141}
 142
 143struct res_mtt {
 144        struct res_common       com;
 145        int                     order;
 146        atomic_t                ref_count;
 147};
 148
 149enum res_mpt_states {
 150        RES_MPT_BUSY = RES_ANY_BUSY,
 151        RES_MPT_RESERVED,
 152        RES_MPT_MAPPED,
 153        RES_MPT_HW,
 154};
 155
 156struct res_mpt {
 157        struct res_common       com;
 158        struct res_mtt         *mtt;
 159        int                     key;
 160};
 161
 162enum res_eq_states {
 163        RES_EQ_BUSY = RES_ANY_BUSY,
 164        RES_EQ_RESERVED,
 165        RES_EQ_HW,
 166};
 167
 168struct res_eq {
 169        struct res_common       com;
 170        struct res_mtt         *mtt;
 171};
 172
 173enum res_cq_states {
 174        RES_CQ_BUSY = RES_ANY_BUSY,
 175        RES_CQ_ALLOCATED,
 176        RES_CQ_HW,
 177};
 178
 179struct res_cq {
 180        struct res_common       com;
 181        struct res_mtt         *mtt;
 182        atomic_t                ref_count;
 183};
 184
 185enum res_srq_states {
 186        RES_SRQ_BUSY = RES_ANY_BUSY,
 187        RES_SRQ_ALLOCATED,
 188        RES_SRQ_HW,
 189};
 190
 191struct res_srq {
 192        struct res_common       com;
 193        struct res_mtt         *mtt;
 194        struct res_cq          *cq;
 195        atomic_t                ref_count;
 196};
 197
 198enum res_counter_states {
 199        RES_COUNTER_BUSY = RES_ANY_BUSY,
 200        RES_COUNTER_ALLOCATED,
 201};
 202
 203struct res_counter {
 204        struct res_common       com;
 205        int                     port;
 206};
 207
 208enum res_xrcdn_states {
 209        RES_XRCD_BUSY = RES_ANY_BUSY,
 210        RES_XRCD_ALLOCATED,
 211};
 212
 213struct res_xrcdn {
 214        struct res_common       com;
 215        int                     port;
 216};
 217
 218enum res_fs_rule_states {
 219        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 220        RES_FS_RULE_ALLOCATED,
 221};
 222
 223struct res_fs_rule {
 224        struct res_common       com;
 225        int                     qpn;
 226        /* VF DMFS mbox with port flipped */
 227        void                    *mirr_mbox;
 228        /* > 0 --> apply mirror when getting into HA mode      */
 229        /* = 0 --> un-apply mirror when getting out of HA mode */
 230        u32                     mirr_mbox_size;
 231        struct list_head        mirr_list;
 232        u64                     mirr_rule_id;
 233};
 234
 235static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 236{
 237        struct rb_node *node = root->rb_node;
 238
 239        while (node) {
 240                struct res_common *res = rb_entry(node, struct res_common,
 241                                                  node);
 242
 243                if (res_id < res->res_id)
 244                        node = node->rb_left;
 245                else if (res_id > res->res_id)
 246                        node = node->rb_right;
 247                else
 248                        return res;
 249        }
 250        return NULL;
 251}
 252
 253static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 254{
 255        struct rb_node **new = &(root->rb_node), *parent = NULL;
 256
 257        /* Figure out where to put new node */
 258        while (*new) {
 259                struct res_common *this = rb_entry(*new, struct res_common,
 260                                                   node);
 261
 262                parent = *new;
 263                if (res->res_id < this->res_id)
 264                        new = &((*new)->rb_left);
 265                else if (res->res_id > this->res_id)
 266                        new = &((*new)->rb_right);
 267                else
 268                        return -EEXIST;
 269        }
 270
 271        /* Add new node and rebalance tree. */
 272        rb_link_node(&res->node, parent, new);
 273        rb_insert_color(&res->node, root);
 274
 275        return 0;
 276}
 277
 278enum qp_transition {
 279        QP_TRANS_INIT2RTR,
 280        QP_TRANS_RTR2RTS,
 281        QP_TRANS_RTS2RTS,
 282        QP_TRANS_SQERR2RTS,
 283        QP_TRANS_SQD2SQD,
 284        QP_TRANS_SQD2RTS
 285};
 286
 287/* For Debug uses */
 288static const char *resource_str(enum mlx4_resource rt)
 289{
 290        switch (rt) {
 291        case RES_QP: return "RES_QP";
 292        case RES_CQ: return "RES_CQ";
 293        case RES_SRQ: return "RES_SRQ";
 294        case RES_MPT: return "RES_MPT";
 295        case RES_MTT: return "RES_MTT";
 296        case RES_MAC: return  "RES_MAC";
 297        case RES_VLAN: return  "RES_VLAN";
 298        case RES_EQ: return "RES_EQ";
 299        case RES_COUNTER: return "RES_COUNTER";
 300        case RES_FS_RULE: return "RES_FS_RULE";
 301        case RES_XRCD: return "RES_XRCD";
 302        default: return "Unknown resource type !!!";
 303        }
 304}
 305
 306static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 307static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 308                                      enum mlx4_resource res_type, int count,
 309                                      int port)
 310{
 311        struct mlx4_priv *priv = mlx4_priv(dev);
 312        struct resource_allocator *res_alloc =
 313                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 314        int err = -EDQUOT;
 315        int allocated, free, reserved, guaranteed, from_free;
 316        int from_rsvd;
 317
 318        if (slave > dev->persist->num_vfs)
 319                return -EINVAL;
 320
 321        spin_lock(&res_alloc->alloc_lock);
 322        allocated = (port > 0) ?
 323                res_alloc->allocated[(port - 1) *
 324                (dev->persist->num_vfs + 1) + slave] :
 325                res_alloc->allocated[slave];
 326        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 327                res_alloc->res_free;
 328        reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 329                res_alloc->res_reserved;
 330        guaranteed = res_alloc->guaranteed[slave];
 331
 332        if (allocated + count > res_alloc->quota[slave]) {
 333                mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
 334                          slave, port, resource_str(res_type), count,
 335                          allocated, res_alloc->quota[slave]);
 336                goto out;
 337        }
 338
 339        if (allocated + count <= guaranteed) {
 340                err = 0;
 341                from_rsvd = count;
 342        } else {
 343                /* portion may need to be obtained from free area */
 344                if (guaranteed - allocated > 0)
 345                        from_free = count - (guaranteed - allocated);
 346                else
 347                        from_free = count;
 348
 349                from_rsvd = count - from_free;
 350
 351                if (free - from_free >= reserved)
 352                        err = 0;
 353                else
 354                        mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
 355                                  slave, port, resource_str(res_type), free,
 356                                  from_free, reserved);
 357        }
 358
 359        if (!err) {
 360                /* grant the request */
 361                if (port > 0) {
 362                        res_alloc->allocated[(port - 1) *
 363                        (dev->persist->num_vfs + 1) + slave] += count;
 364                        res_alloc->res_port_free[port - 1] -= count;
 365                        res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
 366                } else {
 367                        res_alloc->allocated[slave] += count;
 368                        res_alloc->res_free -= count;
 369                        res_alloc->res_reserved -= from_rsvd;
 370                }
 371        }
 372
 373out:
 374        spin_unlock(&res_alloc->alloc_lock);
 375        return err;
 376}
 377
 378static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 379                                    enum mlx4_resource res_type, int count,
 380                                    int port)
 381{
 382        struct mlx4_priv *priv = mlx4_priv(dev);
 383        struct resource_allocator *res_alloc =
 384                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 385        int allocated, guaranteed, from_rsvd;
 386
 387        if (slave > dev->persist->num_vfs)
 388                return;
 389
 390        spin_lock(&res_alloc->alloc_lock);
 391
 392        allocated = (port > 0) ?
 393                res_alloc->allocated[(port - 1) *
 394                (dev->persist->num_vfs + 1) + slave] :
 395                res_alloc->allocated[slave];
 396        guaranteed = res_alloc->guaranteed[slave];
 397
 398        if (allocated - count >= guaranteed) {
 399                from_rsvd = 0;
 400        } else {
 401                /* portion may need to be returned to reserved area */
 402                if (allocated - guaranteed > 0)
 403                        from_rsvd = count - (allocated - guaranteed);
 404                else
 405                        from_rsvd = count;
 406        }
 407
 408        if (port > 0) {
 409                res_alloc->allocated[(port - 1) *
 410                (dev->persist->num_vfs + 1) + slave] -= count;
 411                res_alloc->res_port_free[port - 1] += count;
 412                res_alloc->res_port_rsvd[port - 1] += from_rsvd;
 413        } else {
 414                res_alloc->allocated[slave] -= count;
 415                res_alloc->res_free += count;
 416                res_alloc->res_reserved += from_rsvd;
 417        }
 418
 419        spin_unlock(&res_alloc->alloc_lock);
 420        return;
 421}
 422
 423static inline void initialize_res_quotas(struct mlx4_dev *dev,
 424                                         struct resource_allocator *res_alloc,
 425                                         enum mlx4_resource res_type,
 426                                         int vf, int num_instances)
 427{
 428        res_alloc->guaranteed[vf] = num_instances /
 429                                    (2 * (dev->persist->num_vfs + 1));
 430        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 431        if (vf == mlx4_master_func_num(dev)) {
 432                res_alloc->res_free = num_instances;
 433                if (res_type == RES_MTT) {
 434                        /* reserved mtts will be taken out of the PF allocation */
 435                        res_alloc->res_free += dev->caps.reserved_mtts;
 436                        res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 437                        res_alloc->quota[vf] += dev->caps.reserved_mtts;
 438                }
 439        }
 440}
 441
 442void mlx4_init_quotas(struct mlx4_dev *dev)
 443{
 444        struct mlx4_priv *priv = mlx4_priv(dev);
 445        int pf;
 446
 447        /* quotas for VFs are initialized in mlx4_slave_cap */
 448        if (mlx4_is_slave(dev))
 449                return;
 450
 451        if (!mlx4_is_mfunc(dev)) {
 452                dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 453                        mlx4_num_reserved_sqps(dev);
 454                dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 455                dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 456                dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 457                dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 458                return;
 459        }
 460
 461        pf = mlx4_master_func_num(dev);
 462        dev->quotas.qp =
 463                priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 464        dev->quotas.cq =
 465                priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 466        dev->quotas.srq =
 467                priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 468        dev->quotas.mtt =
 469                priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 470        dev->quotas.mpt =
 471                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 472}
 473
 474static int
 475mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
 476                                 struct resource_allocator *res_alloc,
 477                                 int vf)
 478{
 479        struct mlx4_active_ports actv_ports;
 480        int ports, counters_guaranteed;
 481
 482        /* For master, only allocate according to the number of phys ports */
 483        if (vf == mlx4_master_func_num(dev))
 484                return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
 485
 486        /* calculate real number of ports for the VF */
 487        actv_ports = mlx4_get_active_ports(dev, vf);
 488        ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
 489        counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
 490
 491        /* If we do not have enough counters for this VF, do not
 492         * allocate any for it. '-1' to reduce the sink counter.
 493         */
 494        if ((res_alloc->res_reserved + counters_guaranteed) >
 495            (dev->caps.max_counters - 1))
 496                return 0;
 497
 498        return counters_guaranteed;
 499}
 500
 501int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 502{
 503        struct mlx4_priv *priv = mlx4_priv(dev);
 504        int i, j;
 505        int t;
 506
 507        priv->mfunc.master.res_tracker.slave_list =
 508                kcalloc(dev->num_slaves, sizeof(struct slave_list),
 509                        GFP_KERNEL);
 510        if (!priv->mfunc.master.res_tracker.slave_list)
 511                return -ENOMEM;
 512
 513        for (i = 0 ; i < dev->num_slaves; i++) {
 514                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 515                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 516                                       slave_list[i].res_list[t]);
 517                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 518        }
 519
 520        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 521                 dev->num_slaves);
 522        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 523                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 524
 525        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 526                struct resource_allocator *res_alloc =
 527                        &priv->mfunc.master.res_tracker.res_alloc[i];
 528                res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
 529                                                 sizeof(int),
 530                                                 GFP_KERNEL);
 531                res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
 532                                                      sizeof(int),
 533                                                      GFP_KERNEL);
 534                if (i == RES_MAC || i == RES_VLAN)
 535                        res_alloc->allocated =
 536                                kcalloc(MLX4_MAX_PORTS *
 537                                                (dev->persist->num_vfs + 1),
 538                                        sizeof(int), GFP_KERNEL);
 539                else
 540                        res_alloc->allocated =
 541                                kcalloc(dev->persist->num_vfs + 1,
 542                                        sizeof(int), GFP_KERNEL);
 543                /* Reduce the sink counter */
 544                if (i == RES_COUNTER)
 545                        res_alloc->res_free = dev->caps.max_counters - 1;
 546
 547                if (!res_alloc->quota || !res_alloc->guaranteed ||
 548                    !res_alloc->allocated)
 549                        goto no_mem_err;
 550
 551                spin_lock_init(&res_alloc->alloc_lock);
 552                for (t = 0; t < dev->persist->num_vfs + 1; t++) {
 553                        struct mlx4_active_ports actv_ports =
 554                                mlx4_get_active_ports(dev, t);
 555                        switch (i) {
 556                        case RES_QP:
 557                                initialize_res_quotas(dev, res_alloc, RES_QP,
 558                                                      t, dev->caps.num_qps -
 559                                                      dev->caps.reserved_qps -
 560                                                      mlx4_num_reserved_sqps(dev));
 561                                break;
 562                        case RES_CQ:
 563                                initialize_res_quotas(dev, res_alloc, RES_CQ,
 564                                                      t, dev->caps.num_cqs -
 565                                                      dev->caps.reserved_cqs);
 566                                break;
 567                        case RES_SRQ:
 568                                initialize_res_quotas(dev, res_alloc, RES_SRQ,
 569                                                      t, dev->caps.num_srqs -
 570                                                      dev->caps.reserved_srqs);
 571                                break;
 572                        case RES_MPT:
 573                                initialize_res_quotas(dev, res_alloc, RES_MPT,
 574                                                      t, dev->caps.num_mpts -
 575                                                      dev->caps.reserved_mrws);
 576                                break;
 577                        case RES_MTT:
 578                                initialize_res_quotas(dev, res_alloc, RES_MTT,
 579                                                      t, dev->caps.num_mtts -
 580                                                      dev->caps.reserved_mtts);
 581                                break;
 582                        case RES_MAC:
 583                                if (t == mlx4_master_func_num(dev)) {
 584                                        int max_vfs_pport = 0;
 585                                        /* Calculate the max vfs per port for */
 586                                        /* both ports.                        */
 587                                        for (j = 0; j < dev->caps.num_ports;
 588                                             j++) {
 589                                                struct mlx4_slaves_pport slaves_pport =
 590                                                        mlx4_phys_to_slaves_pport(dev, j + 1);
 591                                                unsigned current_slaves =
 592                                                        bitmap_weight(slaves_pport.slaves,
 593                                                                      dev->caps.num_ports) - 1;
 594                                                if (max_vfs_pport < current_slaves)
 595                                                        max_vfs_pport =
 596                                                                current_slaves;
 597                                        }
 598                                        res_alloc->quota[t] =
 599                                                MLX4_MAX_MAC_NUM -
 600                                                2 * max_vfs_pport;
 601                                        res_alloc->guaranteed[t] = 2;
 602                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 603                                                res_alloc->res_port_free[j] =
 604                                                        MLX4_MAX_MAC_NUM;
 605                                } else {
 606                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 607                                        res_alloc->guaranteed[t] = 2;
 608                                }
 609                                break;
 610                        case RES_VLAN:
 611                                if (t == mlx4_master_func_num(dev)) {
 612                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 613                                        res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 614                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 615                                                res_alloc->res_port_free[j] =
 616                                                        res_alloc->quota[t];
 617                                } else {
 618                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 619                                        res_alloc->guaranteed[t] = 0;
 620                                }
 621                                break;
 622                        case RES_COUNTER:
 623                                res_alloc->quota[t] = dev->caps.max_counters;
 624                                res_alloc->guaranteed[t] =
 625                                        mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
 626                                break;
 627                        default:
 628                                break;
 629                        }
 630                        if (i == RES_MAC || i == RES_VLAN) {
 631                                for (j = 0; j < dev->caps.num_ports; j++)
 632                                        if (test_bit(j, actv_ports.ports))
 633                                                res_alloc->res_port_rsvd[j] +=
 634                                                        res_alloc->guaranteed[t];
 635                        } else {
 636                                res_alloc->res_reserved += res_alloc->guaranteed[t];
 637                        }
 638                }
 639        }
 640        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 641        return 0;
 642
 643no_mem_err:
 644        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 645                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 646                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 647                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 648                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 649                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 650                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 651        }
 652        return -ENOMEM;
 653}
 654
 655void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 656                                enum mlx4_res_tracker_free_type type)
 657{
 658        struct mlx4_priv *priv = mlx4_priv(dev);
 659        int i;
 660
 661        if (priv->mfunc.master.res_tracker.slave_list) {
 662                if (type != RES_TR_FREE_STRUCTS_ONLY) {
 663                        for (i = 0; i < dev->num_slaves; i++) {
 664                                if (type == RES_TR_FREE_ALL ||
 665                                    dev->caps.function != i)
 666                                        mlx4_delete_all_resources_for_slave(dev, i);
 667                        }
 668                        /* free master's vlans */
 669                        i = dev->caps.function;
 670                        mlx4_reset_roce_gids(dev, i);
 671                        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 672                        rem_slave_vlans(dev, i);
 673                        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 674                }
 675
 676                if (type != RES_TR_FREE_SLAVES_ONLY) {
 677                        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 678                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 679                                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 680                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 681                                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 682                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 683                                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 684                        }
 685                        kfree(priv->mfunc.master.res_tracker.slave_list);
 686                        priv->mfunc.master.res_tracker.slave_list = NULL;
 687                }
 688        }
 689}
 690
 691static void update_pkey_index(struct mlx4_dev *dev, int slave,
 692                              struct mlx4_cmd_mailbox *inbox)
 693{
 694        u8 sched = *(u8 *)(inbox->buf + 64);
 695        u8 orig_index = *(u8 *)(inbox->buf + 35);
 696        u8 new_index;
 697        struct mlx4_priv *priv = mlx4_priv(dev);
 698        int port;
 699
 700        port = (sched >> 6 & 1) + 1;
 701
 702        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 703        *(u8 *)(inbox->buf + 35) = new_index;
 704}
 705
 706static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 707                       u8 slave)
 708{
 709        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 710        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 711        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 712        int port;
 713
 714        if (MLX4_QP_ST_UD == ts) {
 715                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 716                if (mlx4_is_eth(dev, port))
 717                        qp_ctx->pri_path.mgid_index =
 718                                mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
 719                else
 720                        qp_ctx->pri_path.mgid_index = slave | 0x80;
 721
 722        } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
 723                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
 724                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 725                        if (mlx4_is_eth(dev, port)) {
 726                                qp_ctx->pri_path.mgid_index +=
 727                                        mlx4_get_base_gid_ix(dev, slave, port);
 728                                qp_ctx->pri_path.mgid_index &= 0x7f;
 729                        } else {
 730                                qp_ctx->pri_path.mgid_index = slave & 0x7F;
 731                        }
 732                }
 733                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
 734                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
 735                        if (mlx4_is_eth(dev, port)) {
 736                                qp_ctx->alt_path.mgid_index +=
 737                                        mlx4_get_base_gid_ix(dev, slave, port);
 738                                qp_ctx->alt_path.mgid_index &= 0x7f;
 739                        } else {
 740                                qp_ctx->alt_path.mgid_index = slave & 0x7F;
 741                        }
 742                }
 743        }
 744}
 745
 746static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
 747                          u8 slave, int port);
 748
 749static int update_vport_qp_param(struct mlx4_dev *dev,
 750                                 struct mlx4_cmd_mailbox *inbox,
 751                                 u8 slave, u32 qpn)
 752{
 753        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 754        struct mlx4_vport_oper_state *vp_oper;
 755        struct mlx4_priv *priv;
 756        u32 qp_type;
 757        int port, err = 0;
 758
 759        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 760        priv = mlx4_priv(dev);
 761        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 762        qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 763
 764        err = handle_counter(dev, qpc, slave, port);
 765        if (err)
 766                goto out;
 767
 768        if (MLX4_VGT != vp_oper->state.default_vlan) {
 769                /* the reserved QPs (special, proxy, tunnel)
 770                 * do not operate over vlans
 771                 */
 772                if (mlx4_is_qp_reserved(dev, qpn))
 773                        return 0;
 774
 775                /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
 776                if (qp_type == MLX4_QP_ST_UD ||
 777                    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
 778                        if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
 779                                *(__be32 *)inbox->buf =
 780                                        cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
 781                                        MLX4_QP_OPTPAR_VLAN_STRIPPING);
 782                                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 783                        } else {
 784                                struct mlx4_update_qp_params params = {.flags = 0};
 785
 786                                err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
 787                                if (err)
 788                                        goto out;
 789                        }
 790                }
 791
 792                /* preserve IF_COUNTER flag */
 793                qpc->pri_path.vlan_control &=
 794                        MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
 795                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 796                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 797                        qpc->pri_path.vlan_control |=
 798                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 799                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 800                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 801                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 802                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 803                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 804                } else if (0 != vp_oper->state.default_vlan) {
 805                        if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
 806                                /* vst QinQ should block untagged on TX,
 807                                 * but cvlan is in payload and phv is set so
 808                                 * hw see it as untagged. Block tagged instead.
 809                                 */
 810                                qpc->pri_path.vlan_control |=
 811                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 812                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 813                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 814                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 815                        } else { /* vst 802.1Q */
 816                                qpc->pri_path.vlan_control |=
 817                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 818                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 819                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 820                        }
 821                } else { /* priority tagged */
 822                        qpc->pri_path.vlan_control |=
 823                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 824                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 825                }
 826
 827                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 828                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 829                qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
 830                if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
 831                        qpc->pri_path.fl |= MLX4_FL_SV;
 832                else
 833                        qpc->pri_path.fl |= MLX4_FL_CV;
 834                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 835                qpc->pri_path.sched_queue &= 0xC7;
 836                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 837                qpc->qos_vport = vp_oper->state.qos_vport;
 838        }
 839        if (vp_oper->state.spoofchk) {
 840                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 841                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 842        }
 843out:
 844        return err;
 845}
 846
 847static int mpt_mask(struct mlx4_dev *dev)
 848{
 849        return dev->caps.num_mpts - 1;
 850}
 851
 852static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
 853{
 854        switch (t) {
 855        case RES_QP:
 856                return "QP";
 857        case RES_CQ:
 858                return "CQ";
 859        case RES_SRQ:
 860                return "SRQ";
 861        case RES_XRCD:
 862                return "XRCD";
 863        case RES_MPT:
 864                return "MPT";
 865        case RES_MTT:
 866                return "MTT";
 867        case RES_MAC:
 868                return "MAC";
 869        case RES_VLAN:
 870                return "VLAN";
 871        case RES_COUNTER:
 872                return "COUNTER";
 873        case RES_FS_RULE:
 874                return "FS_RULE";
 875        case RES_EQ:
 876                return "EQ";
 877        default:
 878                return "INVALID RESOURCE";
 879        }
 880}
 881
 882static void *find_res(struct mlx4_dev *dev, u64 res_id,
 883                      enum mlx4_resource type)
 884{
 885        struct mlx4_priv *priv = mlx4_priv(dev);
 886
 887        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 888                                  res_id);
 889}
 890
 891static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 892                    enum mlx4_resource type,
 893                    void *res, const char *func_name)
 894{
 895        struct res_common *r;
 896        int err = 0;
 897
 898        spin_lock_irq(mlx4_tlock(dev));
 899        r = find_res(dev, res_id, type);
 900        if (!r) {
 901                err = -ENONET;
 902                goto exit;
 903        }
 904
 905        if (r->state == RES_ANY_BUSY) {
 906                mlx4_warn(dev,
 907                          "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
 908                          func_name, slave, res_id, mlx4_resource_type_to_str(type),
 909                          r->func_name);
 910                err = -EBUSY;
 911                goto exit;
 912        }
 913
 914        if (r->owner != slave) {
 915                err = -EPERM;
 916                goto exit;
 917        }
 918
 919        r->from_state = r->state;
 920        r->state = RES_ANY_BUSY;
 921        r->func_name = func_name;
 922
 923        if (res)
 924                *((struct res_common **)res) = r;
 925
 926exit:
 927        spin_unlock_irq(mlx4_tlock(dev));
 928        return err;
 929}
 930
 931#define get_res(dev, slave, res_id, type, res) \
 932        _get_res((dev), (slave), (res_id), (type), (res), __func__)
 933
 934int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 935                                    enum mlx4_resource type,
 936                                    u64 res_id, int *slave)
 937{
 938
 939        struct res_common *r;
 940        int err = -ENOENT;
 941        int id = res_id;
 942
 943        if (type == RES_QP)
 944                id &= 0x7fffff;
 945        spin_lock(mlx4_tlock(dev));
 946
 947        r = find_res(dev, id, type);
 948        if (r) {
 949                *slave = r->owner;
 950                err = 0;
 951        }
 952        spin_unlock(mlx4_tlock(dev));
 953
 954        return err;
 955}
 956
 957static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 958                    enum mlx4_resource type)
 959{
 960        struct res_common *r;
 961
 962        spin_lock_irq(mlx4_tlock(dev));
 963        r = find_res(dev, res_id, type);
 964        if (r) {
 965                r->state = r->from_state;
 966                r->func_name = "";
 967        }
 968        spin_unlock_irq(mlx4_tlock(dev));
 969}
 970
 971static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 972                             u64 in_param, u64 *out_param, int port);
 973
 974static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
 975                                   int counter_index)
 976{
 977        struct res_common *r;
 978        struct res_counter *counter;
 979        int ret = 0;
 980
 981        if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
 982                return ret;
 983
 984        spin_lock_irq(mlx4_tlock(dev));
 985        r = find_res(dev, counter_index, RES_COUNTER);
 986        if (!r || r->owner != slave) {
 987                ret = -EINVAL;
 988        } else {
 989                counter = container_of(r, struct res_counter, com);
 990                if (!counter->port)
 991                        counter->port = port;
 992        }
 993
 994        spin_unlock_irq(mlx4_tlock(dev));
 995        return ret;
 996}
 997
 998static int handle_unexisting_counter(struct mlx4_dev *dev,
 999                                     struct mlx4_qp_context *qpc, u8 slave,
1000                                     int port)
1001{
1002        struct mlx4_priv *priv = mlx4_priv(dev);
1003        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1004        struct res_common *tmp;
1005        struct res_counter *counter;
1006        u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
1007        int err = 0;
1008
1009        spin_lock_irq(mlx4_tlock(dev));
1010        list_for_each_entry(tmp,
1011                            &tracker->slave_list[slave].res_list[RES_COUNTER],
1012                            list) {
1013                counter = container_of(tmp, struct res_counter, com);
1014                if (port == counter->port) {
1015                        qpc->pri_path.counter_index  = counter->com.res_id;
1016                        spin_unlock_irq(mlx4_tlock(dev));
1017                        return 0;
1018                }
1019        }
1020        spin_unlock_irq(mlx4_tlock(dev));
1021
1022        /* No existing counter, need to allocate a new counter */
1023        err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1024                                port);
1025        if (err == -ENOENT) {
1026                err = 0;
1027        } else if (err && err != -ENOSPC) {
1028                mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1029                         __func__, slave, err);
1030        } else {
1031                qpc->pri_path.counter_index = counter_idx;
1032                mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1033                         __func__, slave, qpc->pri_path.counter_index);
1034                err = 0;
1035        }
1036
1037        return err;
1038}
1039
1040static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1041                          u8 slave, int port)
1042{
1043        if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1044                return handle_existing_counter(dev, slave, port,
1045                                               qpc->pri_path.counter_index);
1046
1047        return handle_unexisting_counter(dev, qpc, slave, port);
1048}
1049
1050static struct res_common *alloc_qp_tr(int id)
1051{
1052        struct res_qp *ret;
1053
1054        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1055        if (!ret)
1056                return NULL;
1057
1058        ret->com.res_id = id;
1059        ret->com.state = RES_QP_RESERVED;
1060        ret->local_qpn = id;
1061        INIT_LIST_HEAD(&ret->mcg_list);
1062        spin_lock_init(&ret->mcg_spl);
1063        atomic_set(&ret->ref_count, 0);
1064
1065        return &ret->com;
1066}
1067
1068static struct res_common *alloc_mtt_tr(int id, int order)
1069{
1070        struct res_mtt *ret;
1071
1072        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1073        if (!ret)
1074                return NULL;
1075
1076        ret->com.res_id = id;
1077        ret->order = order;
1078        ret->com.state = RES_MTT_ALLOCATED;
1079        atomic_set(&ret->ref_count, 0);
1080
1081        return &ret->com;
1082}
1083
1084static struct res_common *alloc_mpt_tr(int id, int key)
1085{
1086        struct res_mpt *ret;
1087
1088        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1089        if (!ret)
1090                return NULL;
1091
1092        ret->com.res_id = id;
1093        ret->com.state = RES_MPT_RESERVED;
1094        ret->key = key;
1095
1096        return &ret->com;
1097}
1098
1099static struct res_common *alloc_eq_tr(int id)
1100{
1101        struct res_eq *ret;
1102
1103        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1104        if (!ret)
1105                return NULL;
1106
1107        ret->com.res_id = id;
1108        ret->com.state = RES_EQ_RESERVED;
1109
1110        return &ret->com;
1111}
1112
1113static struct res_common *alloc_cq_tr(int id)
1114{
1115        struct res_cq *ret;
1116
1117        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1118        if (!ret)
1119                return NULL;
1120
1121        ret->com.res_id = id;
1122        ret->com.state = RES_CQ_ALLOCATED;
1123        atomic_set(&ret->ref_count, 0);
1124
1125        return &ret->com;
1126}
1127
1128static struct res_common *alloc_srq_tr(int id)
1129{
1130        struct res_srq *ret;
1131
1132        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1133        if (!ret)
1134                return NULL;
1135
1136        ret->com.res_id = id;
1137        ret->com.state = RES_SRQ_ALLOCATED;
1138        atomic_set(&ret->ref_count, 0);
1139
1140        return &ret->com;
1141}
1142
1143static struct res_common *alloc_counter_tr(int id, int port)
1144{
1145        struct res_counter *ret;
1146
1147        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1148        if (!ret)
1149                return NULL;
1150
1151        ret->com.res_id = id;
1152        ret->com.state = RES_COUNTER_ALLOCATED;
1153        ret->port = port;
1154
1155        return &ret->com;
1156}
1157
1158static struct res_common *alloc_xrcdn_tr(int id)
1159{
1160        struct res_xrcdn *ret;
1161
1162        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1163        if (!ret)
1164                return NULL;
1165
1166        ret->com.res_id = id;
1167        ret->com.state = RES_XRCD_ALLOCATED;
1168
1169        return &ret->com;
1170}
1171
1172static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1173{
1174        struct res_fs_rule *ret;
1175
1176        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1177        if (!ret)
1178                return NULL;
1179
1180        ret->com.res_id = id;
1181        ret->com.state = RES_FS_RULE_ALLOCATED;
1182        ret->qpn = qpn;
1183        return &ret->com;
1184}
1185
1186static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1187                                   int extra)
1188{
1189        struct res_common *ret;
1190
1191        switch (type) {
1192        case RES_QP:
1193                ret = alloc_qp_tr(id);
1194                break;
1195        case RES_MPT:
1196                ret = alloc_mpt_tr(id, extra);
1197                break;
1198        case RES_MTT:
1199                ret = alloc_mtt_tr(id, extra);
1200                break;
1201        case RES_EQ:
1202                ret = alloc_eq_tr(id);
1203                break;
1204        case RES_CQ:
1205                ret = alloc_cq_tr(id);
1206                break;
1207        case RES_SRQ:
1208                ret = alloc_srq_tr(id);
1209                break;
1210        case RES_MAC:
1211                pr_err("implementation missing\n");
1212                return NULL;
1213        case RES_COUNTER:
1214                ret = alloc_counter_tr(id, extra);
1215                break;
1216        case RES_XRCD:
1217                ret = alloc_xrcdn_tr(id);
1218                break;
1219        case RES_FS_RULE:
1220                ret = alloc_fs_rule_tr(id, extra);
1221                break;
1222        default:
1223                return NULL;
1224        }
1225        if (ret)
1226                ret->owner = slave;
1227
1228        return ret;
1229}
1230
1231int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1232                          struct mlx4_counter *data)
1233{
1234        struct mlx4_priv *priv = mlx4_priv(dev);
1235        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1236        struct res_common *tmp;
1237        struct res_counter *counter;
1238        int *counters_arr;
1239        int i = 0, err = 0;
1240
1241        memset(data, 0, sizeof(*data));
1242
1243        counters_arr = kmalloc_array(dev->caps.max_counters,
1244                                     sizeof(*counters_arr), GFP_KERNEL);
1245        if (!counters_arr)
1246                return -ENOMEM;
1247
1248        spin_lock_irq(mlx4_tlock(dev));
1249        list_for_each_entry(tmp,
1250                            &tracker->slave_list[slave].res_list[RES_COUNTER],
1251                            list) {
1252                counter = container_of(tmp, struct res_counter, com);
1253                if (counter->port == port) {
1254                        counters_arr[i] = (int)tmp->res_id;
1255                        i++;
1256                }
1257        }
1258        spin_unlock_irq(mlx4_tlock(dev));
1259        counters_arr[i] = -1;
1260
1261        i = 0;
1262
1263        while (counters_arr[i] != -1) {
1264                err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1265                                             0);
1266                if (err) {
1267                        memset(data, 0, sizeof(*data));
1268                        goto table_changed;
1269                }
1270                i++;
1271        }
1272
1273table_changed:
1274        kfree(counters_arr);
1275        return 0;
1276}
1277
1278static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1279                         enum mlx4_resource type, int extra)
1280{
1281        int i;
1282        int err;
1283        struct mlx4_priv *priv = mlx4_priv(dev);
1284        struct res_common **res_arr;
1285        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1286        struct rb_root *root = &tracker->res_tree[type];
1287
1288        res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
1289        if (!res_arr)
1290                return -ENOMEM;
1291
1292        for (i = 0; i < count; ++i) {
1293                res_arr[i] = alloc_tr(base + i, type, slave, extra);
1294                if (!res_arr[i]) {
1295                        for (--i; i >= 0; --i)
1296                                kfree(res_arr[i]);
1297
1298                        kfree(res_arr);
1299                        return -ENOMEM;
1300                }
1301        }
1302
1303        spin_lock_irq(mlx4_tlock(dev));
1304        for (i = 0; i < count; ++i) {
1305                if (find_res(dev, base + i, type)) {
1306                        err = -EEXIST;
1307                        goto undo;
1308                }
1309                err = res_tracker_insert(root, res_arr[i]);
1310                if (err)
1311                        goto undo;
1312                list_add_tail(&res_arr[i]->list,
1313                              &tracker->slave_list[slave].res_list[type]);
1314        }
1315        spin_unlock_irq(mlx4_tlock(dev));
1316        kfree(res_arr);
1317
1318        return 0;
1319
1320undo:
1321        for (--i; i >= 0; --i) {
1322                rb_erase(&res_arr[i]->node, root);
1323                list_del_init(&res_arr[i]->list);
1324        }
1325
1326        spin_unlock_irq(mlx4_tlock(dev));
1327
1328        for (i = 0; i < count; ++i)
1329                kfree(res_arr[i]);
1330
1331        kfree(res_arr);
1332
1333        return err;
1334}
1335
1336static int remove_qp_ok(struct res_qp *res)
1337{
1338        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1339            !list_empty(&res->mcg_list)) {
1340                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1341                       res->com.state, atomic_read(&res->ref_count));
1342                return -EBUSY;
1343        } else if (res->com.state != RES_QP_RESERVED) {
1344                return -EPERM;
1345        }
1346
1347        return 0;
1348}
1349
1350static int remove_mtt_ok(struct res_mtt *res, int order)
1351{
1352        if (res->com.state == RES_MTT_BUSY ||
1353            atomic_read(&res->ref_count)) {
1354                pr_devel("%s-%d: state %s, ref_count %d\n",
1355                         __func__, __LINE__,
1356                         mtt_states_str(res->com.state),
1357                         atomic_read(&res->ref_count));
1358                return -EBUSY;
1359        } else if (res->com.state != RES_MTT_ALLOCATED)
1360                return -EPERM;
1361        else if (res->order != order)
1362                return -EINVAL;
1363
1364        return 0;
1365}
1366
1367static int remove_mpt_ok(struct res_mpt *res)
1368{
1369        if (res->com.state == RES_MPT_BUSY)
1370                return -EBUSY;
1371        else if (res->com.state != RES_MPT_RESERVED)
1372                return -EPERM;
1373
1374        return 0;
1375}
1376
1377static int remove_eq_ok(struct res_eq *res)
1378{
1379        if (res->com.state == RES_MPT_BUSY)
1380                return -EBUSY;
1381        else if (res->com.state != RES_MPT_RESERVED)
1382                return -EPERM;
1383
1384        return 0;
1385}
1386
1387static int remove_counter_ok(struct res_counter *res)
1388{
1389        if (res->com.state == RES_COUNTER_BUSY)
1390                return -EBUSY;
1391        else if (res->com.state != RES_COUNTER_ALLOCATED)
1392                return -EPERM;
1393
1394        return 0;
1395}
1396
1397static int remove_xrcdn_ok(struct res_xrcdn *res)
1398{
1399        if (res->com.state == RES_XRCD_BUSY)
1400                return -EBUSY;
1401        else if (res->com.state != RES_XRCD_ALLOCATED)
1402                return -EPERM;
1403
1404        return 0;
1405}
1406
1407static int remove_fs_rule_ok(struct res_fs_rule *res)
1408{
1409        if (res->com.state == RES_FS_RULE_BUSY)
1410                return -EBUSY;
1411        else if (res->com.state != RES_FS_RULE_ALLOCATED)
1412                return -EPERM;
1413
1414        return 0;
1415}
1416
1417static int remove_cq_ok(struct res_cq *res)
1418{
1419        if (res->com.state == RES_CQ_BUSY)
1420                return -EBUSY;
1421        else if (res->com.state != RES_CQ_ALLOCATED)
1422                return -EPERM;
1423
1424        return 0;
1425}
1426
1427static int remove_srq_ok(struct res_srq *res)
1428{
1429        if (res->com.state == RES_SRQ_BUSY)
1430                return -EBUSY;
1431        else if (res->com.state != RES_SRQ_ALLOCATED)
1432                return -EPERM;
1433
1434        return 0;
1435}
1436
1437static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1438{
1439        switch (type) {
1440        case RES_QP:
1441                return remove_qp_ok((struct res_qp *)res);
1442        case RES_CQ:
1443                return remove_cq_ok((struct res_cq *)res);
1444        case RES_SRQ:
1445                return remove_srq_ok((struct res_srq *)res);
1446        case RES_MPT:
1447                return remove_mpt_ok((struct res_mpt *)res);
1448        case RES_MTT:
1449                return remove_mtt_ok((struct res_mtt *)res, extra);
1450        case RES_MAC:
1451                return -EOPNOTSUPP;
1452        case RES_EQ:
1453                return remove_eq_ok((struct res_eq *)res);
1454        case RES_COUNTER:
1455                return remove_counter_ok((struct res_counter *)res);
1456        case RES_XRCD:
1457                return remove_xrcdn_ok((struct res_xrcdn *)res);
1458        case RES_FS_RULE:
1459                return remove_fs_rule_ok((struct res_fs_rule *)res);
1460        default:
1461                return -EINVAL;
1462        }
1463}
1464
1465static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1466                         enum mlx4_resource type, int extra)
1467{
1468        u64 i;
1469        int err;
1470        struct mlx4_priv *priv = mlx4_priv(dev);
1471        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1472        struct res_common *r;
1473
1474        spin_lock_irq(mlx4_tlock(dev));
1475        for (i = base; i < base + count; ++i) {
1476                r = res_tracker_lookup(&tracker->res_tree[type], i);
1477                if (!r) {
1478                        err = -ENOENT;
1479                        goto out;
1480                }
1481                if (r->owner != slave) {
1482                        err = -EPERM;
1483                        goto out;
1484                }
1485                err = remove_ok(r, type, extra);
1486                if (err)
1487                        goto out;
1488        }
1489
1490        for (i = base; i < base + count; ++i) {
1491                r = res_tracker_lookup(&tracker->res_tree[type], i);
1492                rb_erase(&r->node, &tracker->res_tree[type]);
1493                list_del(&r->list);
1494                kfree(r);
1495        }
1496        err = 0;
1497
1498out:
1499        spin_unlock_irq(mlx4_tlock(dev));
1500
1501        return err;
1502}
1503
1504static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1505                                enum res_qp_states state, struct res_qp **qp,
1506                                int alloc)
1507{
1508        struct mlx4_priv *priv = mlx4_priv(dev);
1509        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1510        struct res_qp *r;
1511        int err = 0;
1512
1513        spin_lock_irq(mlx4_tlock(dev));
1514        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1515        if (!r)
1516                err = -ENOENT;
1517        else if (r->com.owner != slave)
1518                err = -EPERM;
1519        else {
1520                switch (state) {
1521                case RES_QP_BUSY:
1522                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1523                                 __func__, r->com.res_id);
1524                        err = -EBUSY;
1525                        break;
1526
1527                case RES_QP_RESERVED:
1528                        if (r->com.state == RES_QP_MAPPED && !alloc)
1529                                break;
1530
1531                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1532                        err = -EINVAL;
1533                        break;
1534
1535                case RES_QP_MAPPED:
1536                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
1537                            r->com.state == RES_QP_HW)
1538                                break;
1539                        else {
1540                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1541                                          r->com.res_id);
1542                                err = -EINVAL;
1543                        }
1544
1545                        break;
1546
1547                case RES_QP_HW:
1548                        if (r->com.state != RES_QP_MAPPED)
1549                                err = -EINVAL;
1550                        break;
1551                default:
1552                        err = -EINVAL;
1553                }
1554
1555                if (!err) {
1556                        r->com.from_state = r->com.state;
1557                        r->com.to_state = state;
1558                        r->com.state = RES_QP_BUSY;
1559                        if (qp)
1560                                *qp = r;
1561                }
1562        }
1563
1564        spin_unlock_irq(mlx4_tlock(dev));
1565
1566        return err;
1567}
1568
1569static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1570                                enum res_mpt_states state, struct res_mpt **mpt)
1571{
1572        struct mlx4_priv *priv = mlx4_priv(dev);
1573        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1574        struct res_mpt *r;
1575        int err = 0;
1576
1577        spin_lock_irq(mlx4_tlock(dev));
1578        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1579        if (!r)
1580                err = -ENOENT;
1581        else if (r->com.owner != slave)
1582                err = -EPERM;
1583        else {
1584                switch (state) {
1585                case RES_MPT_BUSY:
1586                        err = -EINVAL;
1587                        break;
1588
1589                case RES_MPT_RESERVED:
1590                        if (r->com.state != RES_MPT_MAPPED)
1591                                err = -EINVAL;
1592                        break;
1593
1594                case RES_MPT_MAPPED:
1595                        if (r->com.state != RES_MPT_RESERVED &&
1596                            r->com.state != RES_MPT_HW)
1597                                err = -EINVAL;
1598                        break;
1599
1600                case RES_MPT_HW:
1601                        if (r->com.state != RES_MPT_MAPPED)
1602                                err = -EINVAL;
1603                        break;
1604                default:
1605                        err = -EINVAL;
1606                }
1607
1608                if (!err) {
1609                        r->com.from_state = r->com.state;
1610                        r->com.to_state = state;
1611                        r->com.state = RES_MPT_BUSY;
1612                        if (mpt)
1613                                *mpt = r;
1614                }
1615        }
1616
1617        spin_unlock_irq(mlx4_tlock(dev));
1618
1619        return err;
1620}
1621
1622static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1623                                enum res_eq_states state, struct res_eq **eq)
1624{
1625        struct mlx4_priv *priv = mlx4_priv(dev);
1626        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1627        struct res_eq *r;
1628        int err = 0;
1629
1630        spin_lock_irq(mlx4_tlock(dev));
1631        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1632        if (!r)
1633                err = -ENOENT;
1634        else if (r->com.owner != slave)
1635                err = -EPERM;
1636        else {
1637                switch (state) {
1638                case RES_EQ_BUSY:
1639                        err = -EINVAL;
1640                        break;
1641
1642                case RES_EQ_RESERVED:
1643                        if (r->com.state != RES_EQ_HW)
1644                                err = -EINVAL;
1645                        break;
1646
1647                case RES_EQ_HW:
1648                        if (r->com.state != RES_EQ_RESERVED)
1649                                err = -EINVAL;
1650                        break;
1651
1652                default:
1653                        err = -EINVAL;
1654                }
1655
1656                if (!err) {
1657                        r->com.from_state = r->com.state;
1658                        r->com.to_state = state;
1659                        r->com.state = RES_EQ_BUSY;
1660                }
1661        }
1662
1663        spin_unlock_irq(mlx4_tlock(dev));
1664
1665        if (!err && eq)
1666                *eq = r;
1667
1668        return err;
1669}
1670
1671static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1672                                enum res_cq_states state, struct res_cq **cq)
1673{
1674        struct mlx4_priv *priv = mlx4_priv(dev);
1675        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1676        struct res_cq *r;
1677        int err;
1678
1679        spin_lock_irq(mlx4_tlock(dev));
1680        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1681        if (!r) {
1682                err = -ENOENT;
1683        } else if (r->com.owner != slave) {
1684                err = -EPERM;
1685        } else if (state == RES_CQ_ALLOCATED) {
1686                if (r->com.state != RES_CQ_HW)
1687                        err = -EINVAL;
1688                else if (atomic_read(&r->ref_count))
1689                        err = -EBUSY;
1690                else
1691                        err = 0;
1692        } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1693                err = -EINVAL;
1694        } else {
1695                err = 0;
1696        }
1697
1698        if (!err) {
1699                r->com.from_state = r->com.state;
1700                r->com.to_state = state;
1701                r->com.state = RES_CQ_BUSY;
1702                if (cq)
1703                        *cq = r;
1704        }
1705
1706        spin_unlock_irq(mlx4_tlock(dev));
1707
1708        return err;
1709}
1710
1711static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1712                                 enum res_srq_states state, struct res_srq **srq)
1713{
1714        struct mlx4_priv *priv = mlx4_priv(dev);
1715        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1716        struct res_srq *r;
1717        int err = 0;
1718
1719        spin_lock_irq(mlx4_tlock(dev));
1720        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1721        if (!r) {
1722                err = -ENOENT;
1723        } else if (r->com.owner != slave) {
1724                err = -EPERM;
1725        } else if (state == RES_SRQ_ALLOCATED) {
1726                if (r->com.state != RES_SRQ_HW)
1727                        err = -EINVAL;
1728                else if (atomic_read(&r->ref_count))
1729                        err = -EBUSY;
1730        } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1731                err = -EINVAL;
1732        }
1733
1734        if (!err) {
1735                r->com.from_state = r->com.state;
1736                r->com.to_state = state;
1737                r->com.state = RES_SRQ_BUSY;
1738                if (srq)
1739                        *srq = r;
1740        }
1741
1742        spin_unlock_irq(mlx4_tlock(dev));
1743
1744        return err;
1745}
1746
1747static void res_abort_move(struct mlx4_dev *dev, int slave,
1748                           enum mlx4_resource type, int id)
1749{
1750        struct mlx4_priv *priv = mlx4_priv(dev);
1751        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1752        struct res_common *r;
1753
1754        spin_lock_irq(mlx4_tlock(dev));
1755        r = res_tracker_lookup(&tracker->res_tree[type], id);
1756        if (r && (r->owner == slave))
1757                r->state = r->from_state;
1758        spin_unlock_irq(mlx4_tlock(dev));
1759}
1760
1761static void res_end_move(struct mlx4_dev *dev, int slave,
1762                         enum mlx4_resource type, int id)
1763{
1764        struct mlx4_priv *priv = mlx4_priv(dev);
1765        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1766        struct res_common *r;
1767
1768        spin_lock_irq(mlx4_tlock(dev));
1769        r = res_tracker_lookup(&tracker->res_tree[type], id);
1770        if (r && (r->owner == slave))
1771                r->state = r->to_state;
1772        spin_unlock_irq(mlx4_tlock(dev));
1773}
1774
1775static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1776{
1777        return mlx4_is_qp_reserved(dev, qpn) &&
1778                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1779}
1780
1781static int fw_reserved(struct mlx4_dev *dev, int qpn)
1782{
1783        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1784}
1785
1786static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1787                        u64 in_param, u64 *out_param)
1788{
1789        int err;
1790        int count;
1791        int align;
1792        int base;
1793        int qpn;
1794        u8 flags;
1795
1796        switch (op) {
1797        case RES_OP_RESERVE:
1798                count = get_param_l(&in_param) & 0xffffff;
1799                /* Turn off all unsupported QP allocation flags that the
1800                 * slave tries to set.
1801                 */
1802                flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1803                align = get_param_h(&in_param);
1804                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1805                if (err)
1806                        return err;
1807
1808                err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1809                if (err) {
1810                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1811                        return err;
1812                }
1813
1814                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1815                if (err) {
1816                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1817                        __mlx4_qp_release_range(dev, base, count);
1818                        return err;
1819                }
1820                set_param_l(out_param, base);
1821                break;
1822        case RES_OP_MAP_ICM:
1823                qpn = get_param_l(&in_param) & 0x7fffff;
1824                if (valid_reserved(dev, slave, qpn)) {
1825                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1826                        if (err)
1827                                return err;
1828                }
1829
1830                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1831                                           NULL, 1);
1832                if (err)
1833                        return err;
1834
1835                if (!fw_reserved(dev, qpn)) {
1836                        err = __mlx4_qp_alloc_icm(dev, qpn);
1837                        if (err) {
1838                                res_abort_move(dev, slave, RES_QP, qpn);
1839                                return err;
1840                        }
1841                }
1842
1843                res_end_move(dev, slave, RES_QP, qpn);
1844                break;
1845
1846        default:
1847                err = -EINVAL;
1848                break;
1849        }
1850        return err;
1851}
1852
1853static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1854                         u64 in_param, u64 *out_param)
1855{
1856        int err = -EINVAL;
1857        int base;
1858        int order;
1859
1860        if (op != RES_OP_RESERVE_AND_MAP)
1861                return err;
1862
1863        order = get_param_l(&in_param);
1864
1865        err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1866        if (err)
1867                return err;
1868
1869        base = __mlx4_alloc_mtt_range(dev, order);
1870        if (base == -1) {
1871                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1872                return -ENOMEM;
1873        }
1874
1875        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1876        if (err) {
1877                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1878                __mlx4_free_mtt_range(dev, base, order);
1879        } else {
1880                set_param_l(out_param, base);
1881        }
1882
1883        return err;
1884}
1885
1886static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1887                         u64 in_param, u64 *out_param)
1888{
1889        int err = -EINVAL;
1890        int index;
1891        int id;
1892        struct res_mpt *mpt;
1893
1894        switch (op) {
1895        case RES_OP_RESERVE:
1896                err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1897                if (err)
1898                        break;
1899
1900                index = __mlx4_mpt_reserve(dev);
1901                if (index == -1) {
1902                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1903                        break;
1904                }
1905                id = index & mpt_mask(dev);
1906
1907                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1908                if (err) {
1909                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1910                        __mlx4_mpt_release(dev, index);
1911                        break;
1912                }
1913                set_param_l(out_param, index);
1914                break;
1915        case RES_OP_MAP_ICM:
1916                index = get_param_l(&in_param);
1917                id = index & mpt_mask(dev);
1918                err = mr_res_start_move_to(dev, slave, id,
1919                                           RES_MPT_MAPPED, &mpt);
1920                if (err)
1921                        return err;
1922
1923                err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1924                if (err) {
1925                        res_abort_move(dev, slave, RES_MPT, id);
1926                        return err;
1927                }
1928
1929                res_end_move(dev, slave, RES_MPT, id);
1930                break;
1931        }
1932        return err;
1933}
1934
1935static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1936                        u64 in_param, u64 *out_param)
1937{
1938        int cqn;
1939        int err;
1940
1941        switch (op) {
1942        case RES_OP_RESERVE_AND_MAP:
1943                err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1944                if (err)
1945                        break;
1946
1947                err = __mlx4_cq_alloc_icm(dev, &cqn);
1948                if (err) {
1949                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1950                        break;
1951                }
1952
1953                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1954                if (err) {
1955                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1956                        __mlx4_cq_free_icm(dev, cqn);
1957                        break;
1958                }
1959
1960                set_param_l(out_param, cqn);
1961                break;
1962
1963        default:
1964                err = -EINVAL;
1965        }
1966
1967        return err;
1968}
1969
1970static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1971                         u64 in_param, u64 *out_param)
1972{
1973        int srqn;
1974        int err;
1975
1976        switch (op) {
1977        case RES_OP_RESERVE_AND_MAP:
1978                err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1979                if (err)
1980                        break;
1981
1982                err = __mlx4_srq_alloc_icm(dev, &srqn);
1983                if (err) {
1984                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1985                        break;
1986                }
1987
1988                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1989                if (err) {
1990                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1991                        __mlx4_srq_free_icm(dev, srqn);
1992                        break;
1993                }
1994
1995                set_param_l(out_param, srqn);
1996                break;
1997
1998        default:
1999                err = -EINVAL;
2000        }
2001
2002        return err;
2003}
2004
2005static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
2006                                     u8 smac_index, u64 *mac)
2007{
2008        struct mlx4_priv *priv = mlx4_priv(dev);
2009        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2010        struct list_head *mac_list =
2011                &tracker->slave_list[slave].res_list[RES_MAC];
2012        struct mac_res *res, *tmp;
2013
2014        list_for_each_entry_safe(res, tmp, mac_list, list) {
2015                if (res->smac_index == smac_index && res->port == (u8) port) {
2016                        *mac = res->mac;
2017                        return 0;
2018                }
2019        }
2020        return -ENOENT;
2021}
2022
2023static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2024{
2025        struct mlx4_priv *priv = mlx4_priv(dev);
2026        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2027        struct list_head *mac_list =
2028                &tracker->slave_list[slave].res_list[RES_MAC];
2029        struct mac_res *res, *tmp;
2030
2031        list_for_each_entry_safe(res, tmp, mac_list, list) {
2032                if (res->mac == mac && res->port == (u8) port) {
2033                        /* mac found. update ref count */
2034                        ++res->ref_count;
2035                        return 0;
2036                }
2037        }
2038
2039        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2040                return -EINVAL;
2041        res = kzalloc(sizeof(*res), GFP_KERNEL);
2042        if (!res) {
2043                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2044                return -ENOMEM;
2045        }
2046        res->mac = mac;
2047        res->port = (u8) port;
2048        res->smac_index = smac_index;
2049        res->ref_count = 1;
2050        list_add_tail(&res->list,
2051                      &tracker->slave_list[slave].res_list[RES_MAC]);
2052        return 0;
2053}
2054
2055static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2056                               int port)
2057{
2058        struct mlx4_priv *priv = mlx4_priv(dev);
2059        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2060        struct list_head *mac_list =
2061                &tracker->slave_list[slave].res_list[RES_MAC];
2062        struct mac_res *res, *tmp;
2063
2064        list_for_each_entry_safe(res, tmp, mac_list, list) {
2065                if (res->mac == mac && res->port == (u8) port) {
2066                        if (!--res->ref_count) {
2067                                list_del(&res->list);
2068                                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2069                                kfree(res);
2070                        }
2071                        break;
2072                }
2073        }
2074}
2075
2076static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2077{
2078        struct mlx4_priv *priv = mlx4_priv(dev);
2079        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2080        struct list_head *mac_list =
2081                &tracker->slave_list[slave].res_list[RES_MAC];
2082        struct mac_res *res, *tmp;
2083        int i;
2084
2085        list_for_each_entry_safe(res, tmp, mac_list, list) {
2086                list_del(&res->list);
2087                /* dereference the mac the num times the slave referenced it */
2088                for (i = 0; i < res->ref_count; i++)
2089                        __mlx4_unregister_mac(dev, res->port, res->mac);
2090                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2091                kfree(res);
2092        }
2093}
2094
2095static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2096                         u64 in_param, u64 *out_param, int in_port)
2097{
2098        int err = -EINVAL;
2099        int port;
2100        u64 mac;
2101        u8 smac_index;
2102
2103        if (op != RES_OP_RESERVE_AND_MAP)
2104                return err;
2105
2106        port = !in_port ? get_param_l(out_param) : in_port;
2107        port = mlx4_slave_convert_port(
2108                        dev, slave, port);
2109
2110        if (port < 0)
2111                return -EINVAL;
2112        mac = in_param;
2113
2114        err = __mlx4_register_mac(dev, port, mac);
2115        if (err >= 0) {
2116                smac_index = err;
2117                set_param_l(out_param, err);
2118                err = 0;
2119        }
2120
2121        if (!err) {
2122                err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2123                if (err)
2124                        __mlx4_unregister_mac(dev, port, mac);
2125        }
2126        return err;
2127}
2128
2129static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2130                             int port, int vlan_index)
2131{
2132        struct mlx4_priv *priv = mlx4_priv(dev);
2133        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2134        struct list_head *vlan_list =
2135                &tracker->slave_list[slave].res_list[RES_VLAN];
2136        struct vlan_res *res, *tmp;
2137
2138        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2139                if (res->vlan == vlan && res->port == (u8) port) {
2140                        /* vlan found. update ref count */
2141                        ++res->ref_count;
2142                        return 0;
2143                }
2144        }
2145
2146        if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2147                return -EINVAL;
2148        res = kzalloc(sizeof(*res), GFP_KERNEL);
2149        if (!res) {
2150                mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2151                return -ENOMEM;
2152        }
2153        res->vlan = vlan;
2154        res->port = (u8) port;
2155        res->vlan_index = vlan_index;
2156        res->ref_count = 1;
2157        list_add_tail(&res->list,
2158                      &tracker->slave_list[slave].res_list[RES_VLAN]);
2159        return 0;
2160}
2161
2162
2163static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2164                                int port)
2165{
2166        struct mlx4_priv *priv = mlx4_priv(dev);
2167        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2168        struct list_head *vlan_list =
2169                &tracker->slave_list[slave].res_list[RES_VLAN];
2170        struct vlan_res *res, *tmp;
2171
2172        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2173                if (res->vlan == vlan && res->port == (u8) port) {
2174                        if (!--res->ref_count) {
2175                                list_del(&res->list);
2176                                mlx4_release_resource(dev, slave, RES_VLAN,
2177                                                      1, port);
2178                                kfree(res);
2179                        }
2180                        break;
2181                }
2182        }
2183}
2184
2185static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2186{
2187        struct mlx4_priv *priv = mlx4_priv(dev);
2188        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2189        struct list_head *vlan_list =
2190                &tracker->slave_list[slave].res_list[RES_VLAN];
2191        struct vlan_res *res, *tmp;
2192        int i;
2193
2194        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2195                list_del(&res->list);
2196                /* dereference the vlan the num times the slave referenced it */
2197                for (i = 0; i < res->ref_count; i++)
2198                        __mlx4_unregister_vlan(dev, res->port, res->vlan);
2199                mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2200                kfree(res);
2201        }
2202}
2203
2204static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2205                          u64 in_param, u64 *out_param, int in_port)
2206{
2207        struct mlx4_priv *priv = mlx4_priv(dev);
2208        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2209        int err;
2210        u16 vlan;
2211        int vlan_index;
2212        int port;
2213
2214        port = !in_port ? get_param_l(out_param) : in_port;
2215
2216        if (!port || op != RES_OP_RESERVE_AND_MAP)
2217                return -EINVAL;
2218
2219        port = mlx4_slave_convert_port(
2220                        dev, slave, port);
2221
2222        if (port < 0)
2223                return -EINVAL;
2224        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2225        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2226                slave_state[slave].old_vlan_api = true;
2227                return 0;
2228        }
2229
2230        vlan = (u16) in_param;
2231
2232        err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2233        if (!err) {
2234                set_param_l(out_param, (u32) vlan_index);
2235                err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2236                if (err)
2237                        __mlx4_unregister_vlan(dev, port, vlan);
2238        }
2239        return err;
2240}
2241
2242static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2243                             u64 in_param, u64 *out_param, int port)
2244{
2245        u32 index;
2246        int err;
2247
2248        if (op != RES_OP_RESERVE)
2249                return -EINVAL;
2250
2251        err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2252        if (err)
2253                return err;
2254
2255        err = __mlx4_counter_alloc(dev, &index);
2256        if (err) {
2257                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2258                return err;
2259        }
2260
2261        err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2262        if (err) {
2263                __mlx4_counter_free(dev, index);
2264                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2265        } else {
2266                set_param_l(out_param, index);
2267        }
2268
2269        return err;
2270}
2271
2272static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2273                           u64 in_param, u64 *out_param)
2274{
2275        u32 xrcdn;
2276        int err;
2277
2278        if (op != RES_OP_RESERVE)
2279                return -EINVAL;
2280
2281        err = __mlx4_xrcd_alloc(dev, &xrcdn);
2282        if (err)
2283                return err;
2284
2285        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2286        if (err)
2287                __mlx4_xrcd_free(dev, xrcdn);
2288        else
2289                set_param_l(out_param, xrcdn);
2290
2291        return err;
2292}
2293
2294int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2295                           struct mlx4_vhcr *vhcr,
2296                           struct mlx4_cmd_mailbox *inbox,
2297                           struct mlx4_cmd_mailbox *outbox,
2298                           struct mlx4_cmd_info *cmd)
2299{
2300        int err;
2301        int alop = vhcr->op_modifier;
2302
2303        switch (vhcr->in_modifier & 0xFF) {
2304        case RES_QP:
2305                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2306                                   vhcr->in_param, &vhcr->out_param);
2307                break;
2308
2309        case RES_MTT:
2310                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2311                                    vhcr->in_param, &vhcr->out_param);
2312                break;
2313
2314        case RES_MPT:
2315                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2316                                    vhcr->in_param, &vhcr->out_param);
2317                break;
2318
2319        case RES_CQ:
2320                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2321                                   vhcr->in_param, &vhcr->out_param);
2322                break;
2323
2324        case RES_SRQ:
2325                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2326                                    vhcr->in_param, &vhcr->out_param);
2327                break;
2328
2329        case RES_MAC:
2330                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2331                                    vhcr->in_param, &vhcr->out_param,
2332                                    (vhcr->in_modifier >> 8) & 0xFF);
2333                break;
2334
2335        case RES_VLAN:
2336                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2337                                     vhcr->in_param, &vhcr->out_param,
2338                                     (vhcr->in_modifier >> 8) & 0xFF);
2339                break;
2340
2341        case RES_COUNTER:
2342                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2343                                        vhcr->in_param, &vhcr->out_param, 0);
2344                break;
2345
2346        case RES_XRCD:
2347                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2348                                      vhcr->in_param, &vhcr->out_param);
2349                break;
2350
2351        default:
2352                err = -EINVAL;
2353                break;
2354        }
2355
2356        return err;
2357}
2358
2359static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2360                       u64 in_param)
2361{
2362        int err;
2363        int count;
2364        int base;
2365        int qpn;
2366
2367        switch (op) {
2368        case RES_OP_RESERVE:
2369                base = get_param_l(&in_param) & 0x7fffff;
2370                count = get_param_h(&in_param);
2371                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2372                if (err)
2373                        break;
2374                mlx4_release_resource(dev, slave, RES_QP, count, 0);
2375                __mlx4_qp_release_range(dev, base, count);
2376                break;
2377        case RES_OP_MAP_ICM:
2378                qpn = get_param_l(&in_param) & 0x7fffff;
2379                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2380                                           NULL, 0);
2381                if (err)
2382                        return err;
2383
2384                if (!fw_reserved(dev, qpn))
2385                        __mlx4_qp_free_icm(dev, qpn);
2386
2387                res_end_move(dev, slave, RES_QP, qpn);
2388
2389                if (valid_reserved(dev, slave, qpn))
2390                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2391                break;
2392        default:
2393                err = -EINVAL;
2394                break;
2395        }
2396        return err;
2397}
2398
2399static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2400                        u64 in_param, u64 *out_param)
2401{
2402        int err = -EINVAL;
2403        int base;
2404        int order;
2405
2406        if (op != RES_OP_RESERVE_AND_MAP)
2407                return err;
2408
2409        base = get_param_l(&in_param);
2410        order = get_param_h(&in_param);
2411        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2412        if (!err) {
2413                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2414                __mlx4_free_mtt_range(dev, base, order);
2415        }
2416        return err;
2417}
2418
2419static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2420                        u64 in_param)
2421{
2422        int err = -EINVAL;
2423        int index;
2424        int id;
2425        struct res_mpt *mpt;
2426
2427        switch (op) {
2428        case RES_OP_RESERVE:
2429                index = get_param_l(&in_param);
2430                id = index & mpt_mask(dev);
2431                err = get_res(dev, slave, id, RES_MPT, &mpt);
2432                if (err)
2433                        break;
2434                index = mpt->key;
2435                put_res(dev, slave, id, RES_MPT);
2436
2437                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2438                if (err)
2439                        break;
2440                mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2441                __mlx4_mpt_release(dev, index);
2442                break;
2443        case RES_OP_MAP_ICM:
2444                index = get_param_l(&in_param);
2445                id = index & mpt_mask(dev);
2446                err = mr_res_start_move_to(dev, slave, id,
2447                                           RES_MPT_RESERVED, &mpt);
2448                if (err)
2449                        return err;
2450
2451                __mlx4_mpt_free_icm(dev, mpt->key);
2452                res_end_move(dev, slave, RES_MPT, id);
2453                break;
2454        default:
2455                err = -EINVAL;
2456                break;
2457        }
2458        return err;
2459}
2460
2461static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2462                       u64 in_param, u64 *out_param)
2463{
2464        int cqn;
2465        int err;
2466
2467        switch (op) {
2468        case RES_OP_RESERVE_AND_MAP:
2469                cqn = get_param_l(&in_param);
2470                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2471                if (err)
2472                        break;
2473
2474                mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2475                __mlx4_cq_free_icm(dev, cqn);
2476                break;
2477
2478        default:
2479                err = -EINVAL;
2480                break;
2481        }
2482
2483        return err;
2484}
2485
2486static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2487                        u64 in_param, u64 *out_param)
2488{
2489        int srqn;
2490        int err;
2491
2492        switch (op) {
2493        case RES_OP_RESERVE_AND_MAP:
2494                srqn = get_param_l(&in_param);
2495                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2496                if (err)
2497                        break;
2498
2499                mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2500                __mlx4_srq_free_icm(dev, srqn);
2501                break;
2502
2503        default:
2504                err = -EINVAL;
2505                break;
2506        }
2507
2508        return err;
2509}
2510
2511static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2512                            u64 in_param, u64 *out_param, int in_port)
2513{
2514        int port;
2515        int err = 0;
2516
2517        switch (op) {
2518        case RES_OP_RESERVE_AND_MAP:
2519                port = !in_port ? get_param_l(out_param) : in_port;
2520                port = mlx4_slave_convert_port(
2521                                dev, slave, port);
2522
2523                if (port < 0)
2524                        return -EINVAL;
2525                mac_del_from_slave(dev, slave, in_param, port);
2526                __mlx4_unregister_mac(dev, port, in_param);
2527                break;
2528        default:
2529                err = -EINVAL;
2530                break;
2531        }
2532
2533        return err;
2534
2535}
2536
2537static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2538                            u64 in_param, u64 *out_param, int port)
2539{
2540        struct mlx4_priv *priv = mlx4_priv(dev);
2541        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2542        int err = 0;
2543
2544        port = mlx4_slave_convert_port(
2545                        dev, slave, port);
2546
2547        if (port < 0)
2548                return -EINVAL;
2549        switch (op) {
2550        case RES_OP_RESERVE_AND_MAP:
2551                if (slave_state[slave].old_vlan_api)
2552                        return 0;
2553                if (!port)
2554                        return -EINVAL;
2555                vlan_del_from_slave(dev, slave, in_param, port);
2556                __mlx4_unregister_vlan(dev, port, in_param);
2557                break;
2558        default:
2559                err = -EINVAL;
2560                break;
2561        }
2562
2563        return err;
2564}
2565
2566static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2567                            u64 in_param, u64 *out_param)
2568{
2569        int index;
2570        int err;
2571
2572        if (op != RES_OP_RESERVE)
2573                return -EINVAL;
2574
2575        index = get_param_l(&in_param);
2576        if (index == MLX4_SINK_COUNTER_INDEX(dev))
2577                return 0;
2578
2579        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2580        if (err)
2581                return err;
2582
2583        __mlx4_counter_free(dev, index);
2584        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2585
2586        return err;
2587}
2588
2589static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2590                          u64 in_param, u64 *out_param)
2591{
2592        int xrcdn;
2593        int err;
2594
2595        if (op != RES_OP_RESERVE)
2596                return -EINVAL;
2597
2598        xrcdn = get_param_l(&in_param);
2599        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2600        if (err)
2601                return err;
2602
2603        __mlx4_xrcd_free(dev, xrcdn);
2604
2605        return err;
2606}
2607
2608int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2609                          struct mlx4_vhcr *vhcr,
2610                          struct mlx4_cmd_mailbox *inbox,
2611                          struct mlx4_cmd_mailbox *outbox,
2612                          struct mlx4_cmd_info *cmd)
2613{
2614        int err = -EINVAL;
2615        int alop = vhcr->op_modifier;
2616
2617        switch (vhcr->in_modifier & 0xFF) {
2618        case RES_QP:
2619                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2620                                  vhcr->in_param);
2621                break;
2622
2623        case RES_MTT:
2624                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2625                                   vhcr->in_param, &vhcr->out_param);
2626                break;
2627
2628        case RES_MPT:
2629                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2630                                   vhcr->in_param);
2631                break;
2632
2633        case RES_CQ:
2634                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2635                                  vhcr->in_param, &vhcr->out_param);
2636                break;
2637
2638        case RES_SRQ:
2639                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2640                                   vhcr->in_param, &vhcr->out_param);
2641                break;
2642
2643        case RES_MAC:
2644                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2645                                   vhcr->in_param, &vhcr->out_param,
2646                                   (vhcr->in_modifier >> 8) & 0xFF);
2647                break;
2648
2649        case RES_VLAN:
2650                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2651                                    vhcr->in_param, &vhcr->out_param,
2652                                    (vhcr->in_modifier >> 8) & 0xFF);
2653                break;
2654
2655        case RES_COUNTER:
2656                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2657                                       vhcr->in_param, &vhcr->out_param);
2658                break;
2659
2660        case RES_XRCD:
2661                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2662                                     vhcr->in_param, &vhcr->out_param);
2663
2664        default:
2665                break;
2666        }
2667        return err;
2668}
2669
2670/* ugly but other choices are uglier */
2671static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2672{
2673        return (be32_to_cpu(mpt->flags) >> 9) & 1;
2674}
2675
2676static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2677{
2678        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2679}
2680
2681static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2682{
2683        return be32_to_cpu(mpt->mtt_sz);
2684}
2685
2686static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2687{
2688        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2689}
2690
2691static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2692{
2693        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2694}
2695
2696static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2697{
2698        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2699}
2700
2701static int mr_is_region(struct mlx4_mpt_entry *mpt)
2702{
2703        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2704}
2705
2706static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2707{
2708        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2709}
2710
2711static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2712{
2713        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2714}
2715
2716static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2717{
2718        int page_shift = (qpc->log_page_size & 0x3f) + 12;
2719        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2720        int log_sq_sride = qpc->sq_size_stride & 7;
2721        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2722        int log_rq_stride = qpc->rq_size_stride & 7;
2723        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2724        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2725        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2726        int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2727        int sq_size;
2728        int rq_size;
2729        int total_pages;
2730        int total_mem;
2731        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2732        int tot;
2733
2734        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2735        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2736        total_mem = sq_size + rq_size;
2737        tot = (total_mem + (page_offset << 6)) >> page_shift;
2738        total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2739
2740        return total_pages;
2741}
2742
2743static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2744                           int size, struct res_mtt *mtt)
2745{
2746        int res_start = mtt->com.res_id;
2747        int res_size = (1 << mtt->order);
2748
2749        if (start < res_start || start + size > res_start + res_size)
2750                return -EPERM;
2751        return 0;
2752}
2753
2754int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2755                           struct mlx4_vhcr *vhcr,
2756                           struct mlx4_cmd_mailbox *inbox,
2757                           struct mlx4_cmd_mailbox *outbox,
2758                           struct mlx4_cmd_info *cmd)
2759{
2760        int err;
2761        int index = vhcr->in_modifier;
2762        struct res_mtt *mtt;
2763        struct res_mpt *mpt = NULL;
2764        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2765        int phys;
2766        int id;
2767        u32 pd;
2768        int pd_slave;
2769
2770        id = index & mpt_mask(dev);
2771        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2772        if (err)
2773                return err;
2774
2775        /* Disable memory windows for VFs. */
2776        if (!mr_is_region(inbox->buf)) {
2777                err = -EPERM;
2778                goto ex_abort;
2779        }
2780
2781        /* Make sure that the PD bits related to the slave id are zeros. */
2782        pd = mr_get_pd(inbox->buf);
2783        pd_slave = (pd >> 17) & 0x7f;
2784        if (pd_slave != 0 && --pd_slave != slave) {
2785                err = -EPERM;
2786                goto ex_abort;
2787        }
2788
2789        if (mr_is_fmr(inbox->buf)) {
2790                /* FMR and Bind Enable are forbidden in slave devices. */
2791                if (mr_is_bind_enabled(inbox->buf)) {
2792                        err = -EPERM;
2793                        goto ex_abort;
2794                }
2795                /* FMR and Memory Windows are also forbidden. */
2796                if (!mr_is_region(inbox->buf)) {
2797                        err = -EPERM;
2798                        goto ex_abort;
2799                }
2800        }
2801
2802        phys = mr_phys_mpt(inbox->buf);
2803        if (!phys) {
2804                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2805                if (err)
2806                        goto ex_abort;
2807
2808                err = check_mtt_range(dev, slave, mtt_base,
2809                                      mr_get_mtt_size(inbox->buf), mtt);
2810                if (err)
2811                        goto ex_put;
2812
2813                mpt->mtt = mtt;
2814        }
2815
2816        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2817        if (err)
2818                goto ex_put;
2819
2820        if (!phys) {
2821                atomic_inc(&mtt->ref_count);
2822                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2823        }
2824
2825        res_end_move(dev, slave, RES_MPT, id);
2826        return 0;
2827
2828ex_put:
2829        if (!phys)
2830                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2831ex_abort:
2832        res_abort_move(dev, slave, RES_MPT, id);
2833
2834        return err;
2835}
2836
2837int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2838                           struct mlx4_vhcr *vhcr,
2839                           struct mlx4_cmd_mailbox *inbox,
2840                           struct mlx4_cmd_mailbox *outbox,
2841                           struct mlx4_cmd_info *cmd)
2842{
2843        int err;
2844        int index = vhcr->in_modifier;
2845        struct res_mpt *mpt;
2846        int id;
2847
2848        id = index & mpt_mask(dev);
2849        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2850        if (err)
2851                return err;
2852
2853        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2854        if (err)
2855                goto ex_abort;
2856
2857        if (mpt->mtt)
2858                atomic_dec(&mpt->mtt->ref_count);
2859
2860        res_end_move(dev, slave, RES_MPT, id);
2861        return 0;
2862
2863ex_abort:
2864        res_abort_move(dev, slave, RES_MPT, id);
2865
2866        return err;
2867}
2868
2869int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2870                           struct mlx4_vhcr *vhcr,
2871                           struct mlx4_cmd_mailbox *inbox,
2872                           struct mlx4_cmd_mailbox *outbox,
2873                           struct mlx4_cmd_info *cmd)
2874{
2875        int err;
2876        int index = vhcr->in_modifier;
2877        struct res_mpt *mpt;
2878        int id;
2879
2880        id = index & mpt_mask(dev);
2881        err = get_res(dev, slave, id, RES_MPT, &mpt);
2882        if (err)
2883                return err;
2884
2885        if (mpt->com.from_state == RES_MPT_MAPPED) {
2886                /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2887                 * that, the VF must read the MPT. But since the MPT entry memory is not
2888                 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2889                 * entry contents. To guarantee that the MPT cannot be changed, the driver
2890                 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2891                 * ownership fofollowing the change. The change here allows the VF to
2892                 * perform QUERY_MPT also when the entry is in SW ownership.
2893                 */
2894                struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2895                                        &mlx4_priv(dev)->mr_table.dmpt_table,
2896                                        mpt->key, NULL);
2897
2898                if (NULL == mpt_entry || NULL == outbox->buf) {
2899                        err = -EINVAL;
2900                        goto out;
2901                }
2902
2903                memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2904
2905                err = 0;
2906        } else if (mpt->com.from_state == RES_MPT_HW) {
2907                err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2908        } else {
2909                err = -EBUSY;
2910                goto out;
2911        }
2912
2913
2914out:
2915        put_res(dev, slave, id, RES_MPT);
2916        return err;
2917}
2918
2919static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2920{
2921        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2922}
2923
2924static int qp_get_scqn(struct mlx4_qp_context *qpc)
2925{
2926        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2927}
2928
2929static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2930{
2931        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2932}
2933
2934static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2935                                  struct mlx4_qp_context *context)
2936{
2937        u32 qpn = vhcr->in_modifier & 0xffffff;
2938        u32 qkey = 0;
2939
2940        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2941                return;
2942
2943        /* adjust qkey in qp context */
2944        context->qkey = cpu_to_be32(qkey);
2945}
2946
2947static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2948                                 struct mlx4_qp_context *qpc,
2949                                 struct mlx4_cmd_mailbox *inbox);
2950
2951int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2952                             struct mlx4_vhcr *vhcr,
2953                             struct mlx4_cmd_mailbox *inbox,
2954                             struct mlx4_cmd_mailbox *outbox,
2955                             struct mlx4_cmd_info *cmd)
2956{
2957        int err;
2958        int qpn = vhcr->in_modifier & 0x7fffff;
2959        struct res_mtt *mtt;
2960        struct res_qp *qp;
2961        struct mlx4_qp_context *qpc = inbox->buf + 8;
2962        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2963        int mtt_size = qp_get_mtt_size(qpc);
2964        struct res_cq *rcq;
2965        struct res_cq *scq;
2966        int rcqn = qp_get_rcqn(qpc);
2967        int scqn = qp_get_scqn(qpc);
2968        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2969        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2970        struct res_srq *srq;
2971        int local_qpn = vhcr->in_modifier & 0xffffff;
2972
2973        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2974        if (err)
2975                return err;
2976
2977        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2978        if (err)
2979                return err;
2980        qp->local_qpn = local_qpn;
2981        qp->sched_queue = 0;
2982        qp->param3 = 0;
2983        qp->vlan_control = 0;
2984        qp->fvl_rx = 0;
2985        qp->pri_path_fl = 0;
2986        qp->vlan_index = 0;
2987        qp->feup = 0;
2988        qp->qpc_flags = be32_to_cpu(qpc->flags);
2989
2990        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2991        if (err)
2992                goto ex_abort;
2993
2994        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2995        if (err)
2996                goto ex_put_mtt;
2997
2998        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2999        if (err)
3000                goto ex_put_mtt;
3001
3002        if (scqn != rcqn) {
3003                err = get_res(dev, slave, scqn, RES_CQ, &scq);
3004                if (err)
3005                        goto ex_put_rcq;
3006        } else
3007                scq = rcq;
3008
3009        if (use_srq) {
3010                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3011                if (err)
3012                        goto ex_put_scq;
3013        }
3014
3015        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3016        update_pkey_index(dev, slave, inbox);
3017        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3018        if (err)
3019                goto ex_put_srq;
3020        atomic_inc(&mtt->ref_count);
3021        qp->mtt = mtt;
3022        atomic_inc(&rcq->ref_count);
3023        qp->rcq = rcq;
3024        atomic_inc(&scq->ref_count);
3025        qp->scq = scq;
3026
3027        if (scqn != rcqn)
3028                put_res(dev, slave, scqn, RES_CQ);
3029
3030        if (use_srq) {
3031                atomic_inc(&srq->ref_count);
3032                put_res(dev, slave, srqn, RES_SRQ);
3033                qp->srq = srq;
3034        }
3035
3036        /* Save param3 for dynamic changes from VST back to VGT */
3037        qp->param3 = qpc->param3;
3038        put_res(dev, slave, rcqn, RES_CQ);
3039        put_res(dev, slave, mtt_base, RES_MTT);
3040        res_end_move(dev, slave, RES_QP, qpn);
3041
3042        return 0;
3043
3044ex_put_srq:
3045        if (use_srq)
3046                put_res(dev, slave, srqn, RES_SRQ);
3047ex_put_scq:
3048        if (scqn != rcqn)
3049                put_res(dev, slave, scqn, RES_CQ);
3050ex_put_rcq:
3051        put_res(dev, slave, rcqn, RES_CQ);
3052ex_put_mtt:
3053        put_res(dev, slave, mtt_base, RES_MTT);
3054ex_abort:
3055        res_abort_move(dev, slave, RES_QP, qpn);
3056
3057        return err;
3058}
3059
3060static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3061{
3062        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3063}
3064
3065static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3066{
3067        int log_eq_size = eqc->log_eq_size & 0x1f;
3068        int page_shift = (eqc->log_page_size & 0x3f) + 12;
3069
3070        if (log_eq_size + 5 < page_shift)
3071                return 1;
3072
3073        return 1 << (log_eq_size + 5 - page_shift);
3074}
3075
3076static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3077{
3078        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3079}
3080
3081static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3082{
3083        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3084        int page_shift = (cqc->log_page_size & 0x3f) + 12;
3085
3086        if (log_cq_size + 5 < page_shift)
3087                return 1;
3088
3089        return 1 << (log_cq_size + 5 - page_shift);
3090}
3091
3092int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3093                          struct mlx4_vhcr *vhcr,
3094                          struct mlx4_cmd_mailbox *inbox,
3095                          struct mlx4_cmd_mailbox *outbox,
3096                          struct mlx4_cmd_info *cmd)
3097{
3098        int err;
3099        int eqn = vhcr->in_modifier;
3100        int res_id = (slave << 10) | eqn;
3101        struct mlx4_eq_context *eqc = inbox->buf;
3102        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3103        int mtt_size = eq_get_mtt_size(eqc);
3104        struct res_eq *eq;
3105        struct res_mtt *mtt;
3106
3107        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3108        if (err)
3109                return err;
3110        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3111        if (err)
3112                goto out_add;
3113
3114        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3115        if (err)
3116                goto out_move;
3117
3118        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3119        if (err)
3120                goto out_put;
3121
3122        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3123        if (err)
3124                goto out_put;
3125
3126        atomic_inc(&mtt->ref_count);
3127        eq->mtt = mtt;
3128        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3129        res_end_move(dev, slave, RES_EQ, res_id);
3130        return 0;
3131
3132out_put:
3133        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3134out_move:
3135        res_abort_move(dev, slave, RES_EQ, res_id);
3136out_add:
3137        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3138        return err;
3139}
3140
3141int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3142                            struct mlx4_vhcr *vhcr,
3143                            struct mlx4_cmd_mailbox *inbox,
3144                            struct mlx4_cmd_mailbox *outbox,
3145                            struct mlx4_cmd_info *cmd)
3146{
3147        int err;
3148        u8 get = vhcr->op_modifier;
3149
3150        if (get != 1)
3151                return -EPERM;
3152
3153        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3154
3155        return err;
3156}
3157
3158static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3159                              int len, struct res_mtt **res)
3160{
3161        struct mlx4_priv *priv = mlx4_priv(dev);
3162        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3163        struct res_mtt *mtt;
3164        int err = -EINVAL;
3165
3166        spin_lock_irq(mlx4_tlock(dev));
3167        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3168                            com.list) {
3169                if (!check_mtt_range(dev, slave, start, len, mtt)) {
3170                        *res = mtt;
3171                        mtt->com.from_state = mtt->com.state;
3172                        mtt->com.state = RES_MTT_BUSY;
3173                        err = 0;
3174                        break;
3175                }
3176        }
3177        spin_unlock_irq(mlx4_tlock(dev));
3178
3179        return err;
3180}
3181
3182static int verify_qp_parameters(struct mlx4_dev *dev,
3183                                struct mlx4_vhcr *vhcr,
3184                                struct mlx4_cmd_mailbox *inbox,
3185                                enum qp_transition transition, u8 slave)
3186{
3187        u32                     qp_type;
3188        u32                     qpn;
3189        struct mlx4_qp_context  *qp_ctx;
3190        enum mlx4_qp_optpar     optpar;
3191        int port;
3192        int num_gids;
3193
3194        qp_ctx  = inbox->buf + 8;
3195        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3196        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3197
3198        if (slave != mlx4_master_func_num(dev)) {
3199                qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
3200                /* setting QP rate-limit is disallowed for VFs */
3201                if (qp_ctx->rate_limit_params)
3202                        return -EPERM;
3203        }
3204
3205        switch (qp_type) {
3206        case MLX4_QP_ST_RC:
3207        case MLX4_QP_ST_XRC:
3208        case MLX4_QP_ST_UC:
3209                switch (transition) {
3210                case QP_TRANS_INIT2RTR:
3211                case QP_TRANS_RTR2RTS:
3212                case QP_TRANS_RTS2RTS:
3213                case QP_TRANS_SQD2SQD:
3214                case QP_TRANS_SQD2RTS:
3215                        if (slave != mlx4_master_func_num(dev)) {
3216                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3217                                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3218                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3219                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3220                                        else
3221                                                num_gids = 1;
3222                                        if (qp_ctx->pri_path.mgid_index >= num_gids)
3223                                                return -EINVAL;
3224                                }
3225                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3226                                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3227                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3228                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3229                                        else
3230                                                num_gids = 1;
3231                                        if (qp_ctx->alt_path.mgid_index >= num_gids)
3232                                                return -EINVAL;
3233                                }
3234                        }
3235                        break;
3236                default:
3237                        break;
3238                }
3239                break;
3240
3241        case MLX4_QP_ST_MLX:
3242                qpn = vhcr->in_modifier & 0x7fffff;
3243                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3244                if (transition == QP_TRANS_INIT2RTR &&
3245                    slave != mlx4_master_func_num(dev) &&
3246                    mlx4_is_qp_reserved(dev, qpn) &&
3247                    !mlx4_vf_smi_enabled(dev, slave, port)) {
3248                        /* only enabled VFs may create MLX proxy QPs */
3249                        mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3250                                 __func__, slave, port);
3251                        return -EPERM;
3252                }
3253                break;
3254
3255        default:
3256                break;
3257        }
3258
3259        return 0;
3260}
3261
3262int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3263                           struct mlx4_vhcr *vhcr,
3264                           struct mlx4_cmd_mailbox *inbox,
3265                           struct mlx4_cmd_mailbox *outbox,
3266                           struct mlx4_cmd_info *cmd)
3267{
3268        struct mlx4_mtt mtt;
3269        __be64 *page_list = inbox->buf;
3270        u64 *pg_list = (u64 *)page_list;
3271        int i;
3272        struct res_mtt *rmtt = NULL;
3273        int start = be64_to_cpu(page_list[0]);
3274        int npages = vhcr->in_modifier;
3275        int err;
3276
3277        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3278        if (err)
3279                return err;
3280
3281        /* Call the SW implementation of write_mtt:
3282         * - Prepare a dummy mtt struct
3283         * - Translate inbox contents to simple addresses in host endianness */
3284        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3285                            we don't really use it */
3286        mtt.order = 0;
3287        mtt.page_shift = 0;
3288        for (i = 0; i < npages; ++i)
3289                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3290
3291        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3292                               ((u64 *)page_list + 2));
3293
3294        if (rmtt)
3295                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3296
3297        return err;
3298}
3299
3300int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3301                          struct mlx4_vhcr *vhcr,
3302                          struct mlx4_cmd_mailbox *inbox,
3303                          struct mlx4_cmd_mailbox *outbox,
3304                          struct mlx4_cmd_info *cmd)
3305{
3306        int eqn = vhcr->in_modifier;
3307        int res_id = eqn | (slave << 10);
3308        struct res_eq *eq;
3309        int err;
3310
3311        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3312        if (err)
3313                return err;
3314
3315        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3316        if (err)
3317                goto ex_abort;
3318
3319        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3320        if (err)
3321                goto ex_put;
3322
3323        atomic_dec(&eq->mtt->ref_count);
3324        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3325        res_end_move(dev, slave, RES_EQ, res_id);
3326        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3327
3328        return 0;
3329
3330ex_put:
3331        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3332ex_abort:
3333        res_abort_move(dev, slave, RES_EQ, res_id);
3334
3335        return err;
3336}
3337
3338int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3339{
3340        struct mlx4_priv *priv = mlx4_priv(dev);
3341        struct mlx4_slave_event_eq_info *event_eq;
3342        struct mlx4_cmd_mailbox *mailbox;
3343        u32 in_modifier = 0;
3344        int err;
3345        int res_id;
3346        struct res_eq *req;
3347
3348        if (!priv->mfunc.master.slave_state)
3349                return -EINVAL;
3350
3351        /* check for slave valid, slave not PF, and slave active */
3352        if (slave < 0 || slave > dev->persist->num_vfs ||
3353            slave == dev->caps.function ||
3354            !priv->mfunc.master.slave_state[slave].active)
3355                return 0;
3356
3357        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3358
3359        /* Create the event only if the slave is registered */
3360        if (event_eq->eqn < 0)
3361                return 0;
3362
3363        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3364        res_id = (slave << 10) | event_eq->eqn;
3365        err = get_res(dev, slave, res_id, RES_EQ, &req);
3366        if (err)
3367                goto unlock;
3368
3369        if (req->com.from_state != RES_EQ_HW) {
3370                err = -EINVAL;
3371                goto put;
3372        }
3373
3374        mailbox = mlx4_alloc_cmd_mailbox(dev);
3375        if (IS_ERR(mailbox)) {
3376                err = PTR_ERR(mailbox);
3377                goto put;
3378        }
3379
3380        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3381                ++event_eq->token;
3382                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3383        }
3384
3385        memcpy(mailbox->buf, (u8 *) eqe, 28);
3386
3387        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3388
3389        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3390                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3391                       MLX4_CMD_NATIVE);
3392
3393        put_res(dev, slave, res_id, RES_EQ);
3394        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3395        mlx4_free_cmd_mailbox(dev, mailbox);
3396        return err;
3397
3398put:
3399        put_res(dev, slave, res_id, RES_EQ);
3400
3401unlock:
3402        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3403        return err;
3404}
3405
3406int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3407                          struct mlx4_vhcr *vhcr,
3408                          struct mlx4_cmd_mailbox *inbox,
3409                          struct mlx4_cmd_mailbox *outbox,
3410                          struct mlx4_cmd_info *cmd)
3411{
3412        int eqn = vhcr->in_modifier;
3413        int res_id = eqn | (slave << 10);
3414        struct res_eq *eq;
3415        int err;
3416
3417        err = get_res(dev, slave, res_id, RES_EQ, &eq);
3418        if (err)
3419                return err;
3420
3421        if (eq->com.from_state != RES_EQ_HW) {
3422                err = -EINVAL;
3423                goto ex_put;
3424        }
3425
3426        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3427
3428ex_put:
3429        put_res(dev, slave, res_id, RES_EQ);
3430        return err;
3431}
3432
3433int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3434                          struct mlx4_vhcr *vhcr,
3435                          struct mlx4_cmd_mailbox *inbox,
3436                          struct mlx4_cmd_mailbox *outbox,
3437                          struct mlx4_cmd_info *cmd)
3438{
3439        int err;
3440        int cqn = vhcr->in_modifier;
3441        struct mlx4_cq_context *cqc = inbox->buf;
3442        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3443        struct res_cq *cq = NULL;
3444        struct res_mtt *mtt;
3445
3446        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3447        if (err)
3448                return err;
3449        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3450        if (err)
3451                goto out_move;
3452        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3453        if (err)
3454                goto out_put;
3455        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3456        if (err)
3457                goto out_put;
3458        atomic_inc(&mtt->ref_count);
3459        cq->mtt = mtt;
3460        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3461        res_end_move(dev, slave, RES_CQ, cqn);
3462        return 0;
3463
3464out_put:
3465        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3466out_move:
3467        res_abort_move(dev, slave, RES_CQ, cqn);
3468        return err;
3469}
3470
3471int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3472                          struct mlx4_vhcr *vhcr,
3473                          struct mlx4_cmd_mailbox *inbox,
3474                          struct mlx4_cmd_mailbox *outbox,
3475                          struct mlx4_cmd_info *cmd)
3476{
3477        int err;
3478        int cqn = vhcr->in_modifier;
3479        struct res_cq *cq = NULL;
3480
3481        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3482        if (err)
3483                return err;
3484        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3485        if (err)
3486                goto out_move;
3487        atomic_dec(&cq->mtt->ref_count);
3488        res_end_move(dev, slave, RES_CQ, cqn);
3489        return 0;
3490
3491out_move:
3492        res_abort_move(dev, slave, RES_CQ, cqn);
3493        return err;
3494}
3495
3496int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3497                          struct mlx4_vhcr *vhcr,
3498                          struct mlx4_cmd_mailbox *inbox,
3499                          struct mlx4_cmd_mailbox *outbox,
3500                          struct mlx4_cmd_info *cmd)
3501{
3502        int cqn = vhcr->in_modifier;
3503        struct res_cq *cq;
3504        int err;
3505
3506        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3507        if (err)
3508                return err;
3509
3510        if (cq->com.from_state != RES_CQ_HW)
3511                goto ex_put;
3512
3513        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3514ex_put:
3515        put_res(dev, slave, cqn, RES_CQ);
3516
3517        return err;
3518}
3519
3520static int handle_resize(struct mlx4_dev *dev, int slave,
3521                         struct mlx4_vhcr *vhcr,
3522                         struct mlx4_cmd_mailbox *inbox,
3523                         struct mlx4_cmd_mailbox *outbox,
3524                         struct mlx4_cmd_info *cmd,
3525                         struct res_cq *cq)
3526{
3527        int err;
3528        struct res_mtt *orig_mtt;
3529        struct res_mtt *mtt;
3530        struct mlx4_cq_context *cqc = inbox->buf;
3531        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3532
3533        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3534        if (err)
3535                return err;
3536
3537        if (orig_mtt != cq->mtt) {
3538                err = -EINVAL;
3539                goto ex_put;
3540        }
3541
3542        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3543        if (err)
3544                goto ex_put;
3545
3546        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3547        if (err)
3548                goto ex_put1;
3549        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3550        if (err)
3551                goto ex_put1;
3552        atomic_dec(&orig_mtt->ref_count);
3553        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3554        atomic_inc(&mtt->ref_count);
3555        cq->mtt = mtt;
3556        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3557        return 0;
3558
3559ex_put1:
3560        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3561ex_put:
3562        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3563
3564        return err;
3565
3566}
3567
3568int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3569                           struct mlx4_vhcr *vhcr,
3570                           struct mlx4_cmd_mailbox *inbox,
3571                           struct mlx4_cmd_mailbox *outbox,
3572                           struct mlx4_cmd_info *cmd)
3573{
3574        int cqn = vhcr->in_modifier;
3575        struct res_cq *cq;
3576        int err;
3577
3578        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3579        if (err)
3580                return err;
3581
3582        if (cq->com.from_state != RES_CQ_HW)
3583                goto ex_put;
3584
3585        if (vhcr->op_modifier == 0) {
3586                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3587                goto ex_put;
3588        }
3589
3590        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3591ex_put:
3592        put_res(dev, slave, cqn, RES_CQ);
3593
3594        return err;
3595}
3596
3597static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3598{
3599        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3600        int log_rq_stride = srqc->logstride & 7;
3601        int page_shift = (srqc->log_page_size & 0x3f) + 12;
3602
3603        if (log_srq_size + log_rq_stride + 4 < page_shift)
3604                return 1;
3605
3606        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3607}
3608
3609int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3610                           struct mlx4_vhcr *vhcr,
3611                           struct mlx4_cmd_mailbox *inbox,
3612                           struct mlx4_cmd_mailbox *outbox,
3613                           struct mlx4_cmd_info *cmd)
3614{
3615        int err;
3616        int srqn = vhcr->in_modifier;
3617        struct res_mtt *mtt;
3618        struct res_srq *srq = NULL;
3619        struct mlx4_srq_context *srqc = inbox->buf;
3620        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3621
3622        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3623                return -EINVAL;
3624
3625        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3626        if (err)
3627                return err;
3628        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3629        if (err)
3630                goto ex_abort;
3631        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3632                              mtt);
3633        if (err)
3634                goto ex_put_mtt;
3635
3636        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3637        if (err)
3638                goto ex_put_mtt;
3639
3640        atomic_inc(&mtt->ref_count);
3641        srq->mtt = mtt;
3642        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3643        res_end_move(dev, slave, RES_SRQ, srqn);
3644        return 0;
3645
3646ex_put_mtt:
3647        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3648ex_abort:
3649        res_abort_move(dev, slave, RES_SRQ, srqn);
3650
3651        return err;
3652}
3653
3654int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3655                           struct mlx4_vhcr *vhcr,
3656                           struct mlx4_cmd_mailbox *inbox,
3657                           struct mlx4_cmd_mailbox *outbox,
3658                           struct mlx4_cmd_info *cmd)
3659{
3660        int err;
3661        int srqn = vhcr->in_modifier;
3662        struct res_srq *srq = NULL;
3663
3664        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3665        if (err)
3666                return err;
3667        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3668        if (err)
3669                goto ex_abort;
3670        atomic_dec(&srq->mtt->ref_count);
3671        if (srq->cq)
3672                atomic_dec(&srq->cq->ref_count);
3673        res_end_move(dev, slave, RES_SRQ, srqn);
3674
3675        return 0;
3676
3677ex_abort:
3678        res_abort_move(dev, slave, RES_SRQ, srqn);
3679
3680        return err;
3681}