linux/drivers/infiniband/core/verbs.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#include <linux/errno.h>
  40#include <linux/err.h>
  41#include <linux/export.h>
  42#include <linux/string.h>
  43#include <linux/slab.h>
  44#include <linux/in.h>
  45#include <linux/in6.h>
  46#include <net/addrconf.h>
  47#include <linux/security.h>
  48
  49#include <rdma/ib_verbs.h>
  50#include <rdma/ib_cache.h>
  51#include <rdma/ib_addr.h>
  52#include <rdma/rw.h>
  53#include <rdma/lag.h>
  54
  55#include "core_priv.h"
  56#include <trace/events/rdma_core.h>
  57
  58static int ib_resolve_eth_dmac(struct ib_device *device,
  59                               struct rdma_ah_attr *ah_attr);
  60
  61static const char * const ib_events[] = {
  62        [IB_EVENT_CQ_ERR]               = "CQ error",
  63        [IB_EVENT_QP_FATAL]             = "QP fatal error",
  64        [IB_EVENT_QP_REQ_ERR]           = "QP request error",
  65        [IB_EVENT_QP_ACCESS_ERR]        = "QP access error",
  66        [IB_EVENT_COMM_EST]             = "communication established",
  67        [IB_EVENT_SQ_DRAINED]           = "send queue drained",
  68        [IB_EVENT_PATH_MIG]             = "path migration successful",
  69        [IB_EVENT_PATH_MIG_ERR]         = "path migration error",
  70        [IB_EVENT_DEVICE_FATAL]         = "device fatal error",
  71        [IB_EVENT_PORT_ACTIVE]          = "port active",
  72        [IB_EVENT_PORT_ERR]             = "port error",
  73        [IB_EVENT_LID_CHANGE]           = "LID change",
  74        [IB_EVENT_PKEY_CHANGE]          = "P_key change",
  75        [IB_EVENT_SM_CHANGE]            = "SM change",
  76        [IB_EVENT_SRQ_ERR]              = "SRQ error",
  77        [IB_EVENT_SRQ_LIMIT_REACHED]    = "SRQ limit reached",
  78        [IB_EVENT_QP_LAST_WQE_REACHED]  = "last WQE reached",
  79        [IB_EVENT_CLIENT_REREGISTER]    = "client reregister",
  80        [IB_EVENT_GID_CHANGE]           = "GID changed",
  81};
  82
  83const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
  84{
  85        size_t index = event;
  86
  87        return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
  88                        ib_events[index] : "unrecognized event";
  89}
  90EXPORT_SYMBOL(ib_event_msg);
  91
  92static const char * const wc_statuses[] = {
  93        [IB_WC_SUCCESS]                 = "success",
  94        [IB_WC_LOC_LEN_ERR]             = "local length error",
  95        [IB_WC_LOC_QP_OP_ERR]           = "local QP operation error",
  96        [IB_WC_LOC_EEC_OP_ERR]          = "local EE context operation error",
  97        [IB_WC_LOC_PROT_ERR]            = "local protection error",
  98        [IB_WC_WR_FLUSH_ERR]            = "WR flushed",
  99        [IB_WC_MW_BIND_ERR]             = "memory bind operation error",
 100        [IB_WC_BAD_RESP_ERR]            = "bad response error",
 101        [IB_WC_LOC_ACCESS_ERR]          = "local access error",
 102        [IB_WC_REM_INV_REQ_ERR]         = "remote invalid request error",
 103        [IB_WC_REM_ACCESS_ERR]          = "remote access error",
 104        [IB_WC_REM_OP_ERR]              = "remote operation error",
 105        [IB_WC_RETRY_EXC_ERR]           = "transport retry counter exceeded",
 106        [IB_WC_RNR_RETRY_EXC_ERR]       = "RNR retry counter exceeded",
 107        [IB_WC_LOC_RDD_VIOL_ERR]        = "local RDD violation error",
 108        [IB_WC_REM_INV_RD_REQ_ERR]      = "remote invalid RD request",
 109        [IB_WC_REM_ABORT_ERR]           = "operation aborted",
 110        [IB_WC_INV_EECN_ERR]            = "invalid EE context number",
 111        [IB_WC_INV_EEC_STATE_ERR]       = "invalid EE context state",
 112        [IB_WC_FATAL_ERR]               = "fatal error",
 113        [IB_WC_RESP_TIMEOUT_ERR]        = "response timeout error",
 114        [IB_WC_GENERAL_ERR]             = "general error",
 115};
 116
 117const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
 118{
 119        size_t index = status;
 120
 121        return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
 122                        wc_statuses[index] : "unrecognized status";
 123}
 124EXPORT_SYMBOL(ib_wc_status_msg);
 125
 126__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
 127{
 128        switch (rate) {
 129        case IB_RATE_2_5_GBPS: return   1;
 130        case IB_RATE_5_GBPS:   return   2;
 131        case IB_RATE_10_GBPS:  return   4;
 132        case IB_RATE_20_GBPS:  return   8;
 133        case IB_RATE_30_GBPS:  return  12;
 134        case IB_RATE_40_GBPS:  return  16;
 135        case IB_RATE_60_GBPS:  return  24;
 136        case IB_RATE_80_GBPS:  return  32;
 137        case IB_RATE_120_GBPS: return  48;
 138        case IB_RATE_14_GBPS:  return   6;
 139        case IB_RATE_56_GBPS:  return  22;
 140        case IB_RATE_112_GBPS: return  45;
 141        case IB_RATE_168_GBPS: return  67;
 142        case IB_RATE_25_GBPS:  return  10;
 143        case IB_RATE_100_GBPS: return  40;
 144        case IB_RATE_200_GBPS: return  80;
 145        case IB_RATE_300_GBPS: return 120;
 146        case IB_RATE_28_GBPS:  return  11;
 147        case IB_RATE_50_GBPS:  return  20;
 148        case IB_RATE_400_GBPS: return 160;
 149        case IB_RATE_600_GBPS: return 240;
 150        default:               return  -1;
 151        }
 152}
 153EXPORT_SYMBOL(ib_rate_to_mult);
 154
 155__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
 156{
 157        switch (mult) {
 158        case 1:   return IB_RATE_2_5_GBPS;
 159        case 2:   return IB_RATE_5_GBPS;
 160        case 4:   return IB_RATE_10_GBPS;
 161        case 8:   return IB_RATE_20_GBPS;
 162        case 12:  return IB_RATE_30_GBPS;
 163        case 16:  return IB_RATE_40_GBPS;
 164        case 24:  return IB_RATE_60_GBPS;
 165        case 32:  return IB_RATE_80_GBPS;
 166        case 48:  return IB_RATE_120_GBPS;
 167        case 6:   return IB_RATE_14_GBPS;
 168        case 22:  return IB_RATE_56_GBPS;
 169        case 45:  return IB_RATE_112_GBPS;
 170        case 67:  return IB_RATE_168_GBPS;
 171        case 10:  return IB_RATE_25_GBPS;
 172        case 40:  return IB_RATE_100_GBPS;
 173        case 80:  return IB_RATE_200_GBPS;
 174        case 120: return IB_RATE_300_GBPS;
 175        case 11:  return IB_RATE_28_GBPS;
 176        case 20:  return IB_RATE_50_GBPS;
 177        case 160: return IB_RATE_400_GBPS;
 178        case 240: return IB_RATE_600_GBPS;
 179        default:  return IB_RATE_PORT_CURRENT;
 180        }
 181}
 182EXPORT_SYMBOL(mult_to_ib_rate);
 183
 184__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
 185{
 186        switch (rate) {
 187        case IB_RATE_2_5_GBPS: return 2500;
 188        case IB_RATE_5_GBPS:   return 5000;
 189        case IB_RATE_10_GBPS:  return 10000;
 190        case IB_RATE_20_GBPS:  return 20000;
 191        case IB_RATE_30_GBPS:  return 30000;
 192        case IB_RATE_40_GBPS:  return 40000;
 193        case IB_RATE_60_GBPS:  return 60000;
 194        case IB_RATE_80_GBPS:  return 80000;
 195        case IB_RATE_120_GBPS: return 120000;
 196        case IB_RATE_14_GBPS:  return 14062;
 197        case IB_RATE_56_GBPS:  return 56250;
 198        case IB_RATE_112_GBPS: return 112500;
 199        case IB_RATE_168_GBPS: return 168750;
 200        case IB_RATE_25_GBPS:  return 25781;
 201        case IB_RATE_100_GBPS: return 103125;
 202        case IB_RATE_200_GBPS: return 206250;
 203        case IB_RATE_300_GBPS: return 309375;
 204        case IB_RATE_28_GBPS:  return 28125;
 205        case IB_RATE_50_GBPS:  return 53125;
 206        case IB_RATE_400_GBPS: return 425000;
 207        case IB_RATE_600_GBPS: return 637500;
 208        default:               return -1;
 209        }
 210}
 211EXPORT_SYMBOL(ib_rate_to_mbps);
 212
 213__attribute_const__ enum rdma_transport_type
 214rdma_node_get_transport(unsigned int node_type)
 215{
 216
 217        if (node_type == RDMA_NODE_USNIC)
 218                return RDMA_TRANSPORT_USNIC;
 219        if (node_type == RDMA_NODE_USNIC_UDP)
 220                return RDMA_TRANSPORT_USNIC_UDP;
 221        if (node_type == RDMA_NODE_RNIC)
 222                return RDMA_TRANSPORT_IWARP;
 223        if (node_type == RDMA_NODE_UNSPECIFIED)
 224                return RDMA_TRANSPORT_UNSPECIFIED;
 225
 226        return RDMA_TRANSPORT_IB;
 227}
 228EXPORT_SYMBOL(rdma_node_get_transport);
 229
 230enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
 231                                              u32 port_num)
 232{
 233        enum rdma_transport_type lt;
 234        if (device->ops.get_link_layer)
 235                return device->ops.get_link_layer(device, port_num);
 236
 237        lt = rdma_node_get_transport(device->node_type);
 238        if (lt == RDMA_TRANSPORT_IB)
 239                return IB_LINK_LAYER_INFINIBAND;
 240
 241        return IB_LINK_LAYER_ETHERNET;
 242}
 243EXPORT_SYMBOL(rdma_port_get_link_layer);
 244
 245/* Protection domains */
 246
 247/**
 248 * __ib_alloc_pd - Allocates an unused protection domain.
 249 * @device: The device on which to allocate the protection domain.
 250 * @flags: protection domain flags
 251 * @caller: caller's build-time module name
 252 *
 253 * A protection domain object provides an association between QPs, shared
 254 * receive queues, address handles, memory regions, and memory windows.
 255 *
 256 * Every PD has a local_dma_lkey which can be used as the lkey value for local
 257 * memory operations.
 258 */
 259struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
 260                const char *caller)
 261{
 262        struct ib_pd *pd;
 263        int mr_access_flags = 0;
 264        int ret;
 265
 266        pd = rdma_zalloc_drv_obj(device, ib_pd);
 267        if (!pd)
 268                return ERR_PTR(-ENOMEM);
 269
 270        pd->device = device;
 271        pd->uobject = NULL;
 272        pd->__internal_mr = NULL;
 273        atomic_set(&pd->usecnt, 0);
 274        pd->flags = flags;
 275
 276        rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
 277        rdma_restrack_set_name(&pd->res, caller);
 278
 279        ret = device->ops.alloc_pd(pd, NULL);
 280        if (ret) {
 281                rdma_restrack_put(&pd->res);
 282                kfree(pd);
 283                return ERR_PTR(ret);
 284        }
 285        rdma_restrack_add(&pd->res);
 286
 287        if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
 288                pd->local_dma_lkey = device->local_dma_lkey;
 289        else
 290                mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
 291
 292        if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
 293                pr_warn("%s: enabling unsafe global rkey\n", caller);
 294                mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
 295        }
 296
 297        if (mr_access_flags) {
 298                struct ib_mr *mr;
 299
 300                mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
 301                if (IS_ERR(mr)) {
 302                        ib_dealloc_pd(pd);
 303                        return ERR_CAST(mr);
 304                }
 305
 306                mr->device      = pd->device;
 307                mr->pd          = pd;
 308                mr->type        = IB_MR_TYPE_DMA;
 309                mr->uobject     = NULL;
 310                mr->need_inval  = false;
 311
 312                pd->__internal_mr = mr;
 313
 314                if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
 315                        pd->local_dma_lkey = pd->__internal_mr->lkey;
 316
 317                if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
 318                        pd->unsafe_global_rkey = pd->__internal_mr->rkey;
 319        }
 320
 321        return pd;
 322}
 323EXPORT_SYMBOL(__ib_alloc_pd);
 324
 325/**
 326 * ib_dealloc_pd_user - Deallocates a protection domain.
 327 * @pd: The protection domain to deallocate.
 328 * @udata: Valid user data or NULL for kernel object
 329 *
 330 * It is an error to call this function while any resources in the pd still
 331 * exist.  The caller is responsible to synchronously destroy them and
 332 * guarantee no new allocations will happen.
 333 */
 334int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
 335{
 336        int ret;
 337
 338        if (pd->__internal_mr) {
 339                ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
 340                WARN_ON(ret);
 341                pd->__internal_mr = NULL;
 342        }
 343
 344        /* uverbs manipulates usecnt with proper locking, while the kabi
 345         * requires the caller to guarantee we can't race here.
 346         */
 347        WARN_ON(atomic_read(&pd->usecnt));
 348
 349        ret = pd->device->ops.dealloc_pd(pd, udata);
 350        if (ret)
 351                return ret;
 352
 353        rdma_restrack_del(&pd->res);
 354        kfree(pd);
 355        return ret;
 356}
 357EXPORT_SYMBOL(ib_dealloc_pd_user);
 358
 359/* Address handles */
 360
 361/**
 362 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
 363 * @dest:       Pointer to destination ah_attr. Contents of the destination
 364 *              pointer is assumed to be invalid and attribute are overwritten.
 365 * @src:        Pointer to source ah_attr.
 366 */
 367void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
 368                       const struct rdma_ah_attr *src)
 369{
 370        *dest = *src;
 371        if (dest->grh.sgid_attr)
 372                rdma_hold_gid_attr(dest->grh.sgid_attr);
 373}
 374EXPORT_SYMBOL(rdma_copy_ah_attr);
 375
 376/**
 377 * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
 378 * @old:        Pointer to existing ah_attr which needs to be replaced.
 379 *              old is assumed to be valid or zero'd
 380 * @new:        Pointer to the new ah_attr.
 381 *
 382 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
 383 * old the ah_attr is valid; after that it copies the new attribute and holds
 384 * the reference to the replaced ah_attr.
 385 */
 386void rdma_replace_ah_attr(struct rdma_ah_attr *old,
 387                          const struct rdma_ah_attr *new)
 388{
 389        rdma_destroy_ah_attr(old);
 390        *old = *new;
 391        if (old->grh.sgid_attr)
 392                rdma_hold_gid_attr(old->grh.sgid_attr);
 393}
 394EXPORT_SYMBOL(rdma_replace_ah_attr);
 395
 396/**
 397 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
 398 * @dest:       Pointer to destination ah_attr to copy to.
 399 *              dest is assumed to be valid or zero'd
 400 * @src:        Pointer to the new ah_attr.
 401 *
 402 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
 403 * if it is valid. This also transfers ownership of internal references from
 404 * src to dest, making src invalid in the process. No new reference of the src
 405 * ah_attr is taken.
 406 */
 407void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
 408{
 409        rdma_destroy_ah_attr(dest);
 410        *dest = *src;
 411        src->grh.sgid_attr = NULL;
 412}
 413EXPORT_SYMBOL(rdma_move_ah_attr);
 414
 415/*
 416 * Validate that the rdma_ah_attr is valid for the device before passing it
 417 * off to the driver.
 418 */
 419static int rdma_check_ah_attr(struct ib_device *device,
 420                              struct rdma_ah_attr *ah_attr)
 421{
 422        if (!rdma_is_port_valid(device, ah_attr->port_num))
 423                return -EINVAL;
 424
 425        if ((rdma_is_grh_required(device, ah_attr->port_num) ||
 426             ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
 427            !(ah_attr->ah_flags & IB_AH_GRH))
 428                return -EINVAL;
 429
 430        if (ah_attr->grh.sgid_attr) {
 431                /*
 432                 * Make sure the passed sgid_attr is consistent with the
 433                 * parameters
 434                 */
 435                if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
 436                    ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
 437                        return -EINVAL;
 438        }
 439        return 0;
 440}
 441
 442/*
 443 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
 444 * On success the caller is responsible to call rdma_unfill_sgid_attr().
 445 */
 446static int rdma_fill_sgid_attr(struct ib_device *device,
 447                               struct rdma_ah_attr *ah_attr,
 448                               const struct ib_gid_attr **old_sgid_attr)
 449{
 450        const struct ib_gid_attr *sgid_attr;
 451        struct ib_global_route *grh;
 452        int ret;
 453
 454        *old_sgid_attr = ah_attr->grh.sgid_attr;
 455
 456        ret = rdma_check_ah_attr(device, ah_attr);
 457        if (ret)
 458                return ret;
 459
 460        if (!(ah_attr->ah_flags & IB_AH_GRH))
 461                return 0;
 462
 463        grh = rdma_ah_retrieve_grh(ah_attr);
 464        if (grh->sgid_attr)
 465                return 0;
 466
 467        sgid_attr =
 468                rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
 469        if (IS_ERR(sgid_attr))
 470                return PTR_ERR(sgid_attr);
 471
 472        /* Move ownerhip of the kref into the ah_attr */
 473        grh->sgid_attr = sgid_attr;
 474        return 0;
 475}
 476
 477static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
 478                                  const struct ib_gid_attr *old_sgid_attr)
 479{
 480        /*
 481         * Fill didn't change anything, the caller retains ownership of
 482         * whatever it passed
 483         */
 484        if (ah_attr->grh.sgid_attr == old_sgid_attr)
 485                return;
 486
 487        /*
 488         * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
 489         * doesn't see any change in the rdma_ah_attr. If we get here
 490         * old_sgid_attr is NULL.
 491         */
 492        rdma_destroy_ah_attr(ah_attr);
 493}
 494
 495static const struct ib_gid_attr *
 496rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
 497                      const struct ib_gid_attr *old_attr)
 498{
 499        if (old_attr)
 500                rdma_put_gid_attr(old_attr);
 501        if (ah_attr->ah_flags & IB_AH_GRH) {
 502                rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
 503                return ah_attr->grh.sgid_attr;
 504        }
 505        return NULL;
 506}
 507
 508static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
 509                                     struct rdma_ah_attr *ah_attr,
 510                                     u32 flags,
 511                                     struct ib_udata *udata,
 512                                     struct net_device *xmit_slave)
 513{
 514        struct rdma_ah_init_attr init_attr = {};
 515        struct ib_device *device = pd->device;
 516        struct ib_ah *ah;
 517        int ret;
 518
 519        might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
 520
 521        if (!udata && !device->ops.create_ah)
 522                return ERR_PTR(-EOPNOTSUPP);
 523
 524        ah = rdma_zalloc_drv_obj_gfp(
 525                device, ib_ah,
 526                (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
 527        if (!ah)
 528                return ERR_PTR(-ENOMEM);
 529
 530        ah->device = device;
 531        ah->pd = pd;
 532        ah->type = ah_attr->type;
 533        ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
 534        init_attr.ah_attr = ah_attr;
 535        init_attr.flags = flags;
 536        init_attr.xmit_slave = xmit_slave;
 537
 538        if (udata)
 539                ret = device->ops.create_user_ah(ah, &init_attr, udata);
 540        else
 541                ret = device->ops.create_ah(ah, &init_attr, NULL);
 542        if (ret) {
 543                kfree(ah);
 544                return ERR_PTR(ret);
 545        }
 546
 547        atomic_inc(&pd->usecnt);
 548        return ah;
 549}
 550
 551/**
 552 * rdma_create_ah - Creates an address handle for the
 553 * given address vector.
 554 * @pd: The protection domain associated with the address handle.
 555 * @ah_attr: The attributes of the address vector.
 556 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
 557 *
 558 * It returns 0 on success and returns appropriate error code on error.
 559 * The address handle is used to reference a local or global destination
 560 * in all UD QP post sends.
 561 */
 562struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
 563                             u32 flags)
 564{
 565        const struct ib_gid_attr *old_sgid_attr;
 566        struct net_device *slave;
 567        struct ib_ah *ah;
 568        int ret;
 569
 570        ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
 571        if (ret)
 572                return ERR_PTR(ret);
 573        slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
 574                                           (flags & RDMA_CREATE_AH_SLEEPABLE) ?
 575                                           GFP_KERNEL : GFP_ATOMIC);
 576        if (IS_ERR(slave)) {
 577                rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 578                return (void *)slave;
 579        }
 580        ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
 581        rdma_lag_put_ah_roce_slave(slave);
 582        rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 583        return ah;
 584}
 585EXPORT_SYMBOL(rdma_create_ah);
 586
 587/**
 588 * rdma_create_user_ah - Creates an address handle for the
 589 * given address vector.
 590 * It resolves destination mac address for ah attribute of RoCE type.
 591 * @pd: The protection domain associated with the address handle.
 592 * @ah_attr: The attributes of the address vector.
 593 * @udata: pointer to user's input output buffer information need by
 594 *         provider driver.
 595 *
 596 * It returns 0 on success and returns appropriate error code on error.
 597 * The address handle is used to reference a local or global destination
 598 * in all UD QP post sends.
 599 */
 600struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
 601                                  struct rdma_ah_attr *ah_attr,
 602                                  struct ib_udata *udata)
 603{
 604        const struct ib_gid_attr *old_sgid_attr;
 605        struct ib_ah *ah;
 606        int err;
 607
 608        err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
 609        if (err)
 610                return ERR_PTR(err);
 611
 612        if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
 613                err = ib_resolve_eth_dmac(pd->device, ah_attr);
 614                if (err) {
 615                        ah = ERR_PTR(err);
 616                        goto out;
 617                }
 618        }
 619
 620        ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
 621                             udata, NULL);
 622
 623out:
 624        rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 625        return ah;
 626}
 627EXPORT_SYMBOL(rdma_create_user_ah);
 628
 629int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
 630{
 631        const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
 632        struct iphdr ip4h_checked;
 633        const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
 634
 635        /* If it's IPv6, the version must be 6, otherwise, the first
 636         * 20 bytes (before the IPv4 header) are garbled.
 637         */
 638        if (ip6h->version != 6)
 639                return (ip4h->version == 4) ? 4 : 0;
 640        /* version may be 6 or 4 because the first 20 bytes could be garbled */
 641
 642        /* RoCE v2 requires no options, thus header length
 643         * must be 5 words
 644         */
 645        if (ip4h->ihl != 5)
 646                return 6;
 647
 648        /* Verify checksum.
 649         * We can't write on scattered buffers so we need to copy to
 650         * temp buffer.
 651         */
 652        memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
 653        ip4h_checked.check = 0;
 654        ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
 655        /* if IPv4 header checksum is OK, believe it */
 656        if (ip4h->check == ip4h_checked.check)
 657                return 4;
 658        return 6;
 659}
 660EXPORT_SYMBOL(ib_get_rdma_header_version);
 661
 662static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
 663                                                     u32 port_num,
 664                                                     const struct ib_grh *grh)
 665{
 666        int grh_version;
 667
 668        if (rdma_protocol_ib(device, port_num))
 669                return RDMA_NETWORK_IB;
 670
 671        grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
 672
 673        if (grh_version == 4)
 674                return RDMA_NETWORK_IPV4;
 675
 676        if (grh->next_hdr == IPPROTO_UDP)
 677                return RDMA_NETWORK_IPV6;
 678
 679        return RDMA_NETWORK_ROCE_V1;
 680}
 681
 682struct find_gid_index_context {
 683        u16 vlan_id;
 684        enum ib_gid_type gid_type;
 685};
 686
 687static bool find_gid_index(const union ib_gid *gid,
 688                           const struct ib_gid_attr *gid_attr,
 689                           void *context)
 690{
 691        struct find_gid_index_context *ctx = context;
 692        u16 vlan_id = 0xffff;
 693        int ret;
 694
 695        if (ctx->gid_type != gid_attr->gid_type)
 696                return false;
 697
 698        ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
 699        if (ret)
 700                return false;
 701
 702        return ctx->vlan_id == vlan_id;
 703}
 704
 705static const struct ib_gid_attr *
 706get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
 707                       u16 vlan_id, const union ib_gid *sgid,
 708                       enum ib_gid_type gid_type)
 709{
 710        struct find_gid_index_context context = {.vlan_id = vlan_id,
 711                                                 .gid_type = gid_type};
 712
 713        return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
 714                                       &context);
 715}
 716
 717int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
 718                              enum rdma_network_type net_type,
 719                              union ib_gid *sgid, union ib_gid *dgid)
 720{
 721        struct sockaddr_in  src_in;
 722        struct sockaddr_in  dst_in;
 723        __be32 src_saddr, dst_saddr;
 724
 725        if (!sgid || !dgid)
 726                return -EINVAL;
 727
 728        if (net_type == RDMA_NETWORK_IPV4) {
 729                memcpy(&src_in.sin_addr.s_addr,
 730                       &hdr->roce4grh.saddr, 4);
 731                memcpy(&dst_in.sin_addr.s_addr,
 732                       &hdr->roce4grh.daddr, 4);
 733                src_saddr = src_in.sin_addr.s_addr;
 734                dst_saddr = dst_in.sin_addr.s_addr;
 735                ipv6_addr_set_v4mapped(src_saddr,
 736                                       (struct in6_addr *)sgid);
 737                ipv6_addr_set_v4mapped(dst_saddr,
 738                                       (struct in6_addr *)dgid);
 739                return 0;
 740        } else if (net_type == RDMA_NETWORK_IPV6 ||
 741                   net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
 742                *dgid = hdr->ibgrh.dgid;
 743                *sgid = hdr->ibgrh.sgid;
 744                return 0;
 745        } else {
 746                return -EINVAL;
 747        }
 748}
 749EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
 750
 751/* Resolve destination mac address and hop limit for unicast destination
 752 * GID entry, considering the source GID entry as well.
 753 * ah_attribute must have have valid port_num, sgid_index.
 754 */
 755static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
 756                                       struct rdma_ah_attr *ah_attr)
 757{
 758        struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
 759        const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
 760        int hop_limit = 0xff;
 761        int ret = 0;
 762
 763        /* If destination is link local and source GID is RoCEv1,
 764         * IP stack is not used.
 765         */
 766        if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
 767            sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
 768                rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
 769                                ah_attr->roce.dmac);
 770                return ret;
 771        }
 772
 773        ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
 774                                           ah_attr->roce.dmac,
 775                                           sgid_attr, &hop_limit);
 776
 777        grh->hop_limit = hop_limit;
 778        return ret;
 779}
 780
 781/*
 782 * This function initializes address handle attributes from the incoming packet.
 783 * Incoming packet has dgid of the receiver node on which this code is
 784 * getting executed and, sgid contains the GID of the sender.
 785 *
 786 * When resolving mac address of destination, the arrived dgid is used
 787 * as sgid and, sgid is used as dgid because sgid contains destinations
 788 * GID whom to respond to.
 789 *
 790 * On success the caller is responsible to call rdma_destroy_ah_attr on the
 791 * attr.
 792 */
 793int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
 794                            const struct ib_wc *wc, const struct ib_grh *grh,
 795                            struct rdma_ah_attr *ah_attr)
 796{
 797        u32 flow_class;
 798        int ret;
 799        enum rdma_network_type net_type = RDMA_NETWORK_IB;
 800        enum ib_gid_type gid_type = IB_GID_TYPE_IB;
 801        const struct ib_gid_attr *sgid_attr;
 802        int hoplimit = 0xff;
 803        union ib_gid dgid;
 804        union ib_gid sgid;
 805
 806        might_sleep();
 807
 808        memset(ah_attr, 0, sizeof *ah_attr);
 809        ah_attr->type = rdma_ah_find_type(device, port_num);
 810        if (rdma_cap_eth_ah(device, port_num)) {
 811                if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
 812                        net_type = wc->network_hdr_type;
 813                else
 814                        net_type = ib_get_net_type_by_grh(device, port_num, grh);
 815                gid_type = ib_network_to_gid_type(net_type);
 816        }
 817        ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
 818                                        &sgid, &dgid);
 819        if (ret)
 820                return ret;
 821
 822        rdma_ah_set_sl(ah_attr, wc->sl);
 823        rdma_ah_set_port_num(ah_attr, port_num);
 824
 825        if (rdma_protocol_roce(device, port_num)) {
 826                u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
 827                                wc->vlan_id : 0xffff;
 828
 829                if (!(wc->wc_flags & IB_WC_GRH))
 830                        return -EPROTOTYPE;
 831
 832                sgid_attr = get_sgid_attr_from_eth(device, port_num,
 833                                                   vlan_id, &dgid,
 834                                                   gid_type);
 835                if (IS_ERR(sgid_attr))
 836                        return PTR_ERR(sgid_attr);
 837
 838                flow_class = be32_to_cpu(grh->version_tclass_flow);
 839                rdma_move_grh_sgid_attr(ah_attr,
 840                                        &sgid,
 841                                        flow_class & 0xFFFFF,
 842                                        hoplimit,
 843                                        (flow_class >> 20) & 0xFF,
 844                                        sgid_attr);
 845
 846                ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
 847                if (ret)
 848                        rdma_destroy_ah_attr(ah_attr);
 849
 850                return ret;
 851        } else {
 852                rdma_ah_set_dlid(ah_attr, wc->slid);
 853                rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
 854
 855                if ((wc->wc_flags & IB_WC_GRH) == 0)
 856                        return 0;
 857
 858                if (dgid.global.interface_id !=
 859                                        cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
 860                        sgid_attr = rdma_find_gid_by_port(
 861                                device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
 862                } else
 863                        sgid_attr = rdma_get_gid_attr(device, port_num, 0);
 864
 865                if (IS_ERR(sgid_attr))
 866                        return PTR_ERR(sgid_attr);
 867                flow_class = be32_to_cpu(grh->version_tclass_flow);
 868                rdma_move_grh_sgid_attr(ah_attr,
 869                                        &sgid,
 870                                        flow_class & 0xFFFFF,
 871                                        hoplimit,
 872                                        (flow_class >> 20) & 0xFF,
 873                                        sgid_attr);
 874
 875                return 0;
 876        }
 877}
 878EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
 879
 880/**
 881 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
 882 * of the reference
 883 *
 884 * @attr:       Pointer to AH attribute structure
 885 * @dgid:       Destination GID
 886 * @flow_label: Flow label
 887 * @hop_limit:  Hop limit
 888 * @traffic_class: traffic class
 889 * @sgid_attr:  Pointer to SGID attribute
 890 *
 891 * This takes ownership of the sgid_attr reference. The caller must ensure
 892 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
 893 * calling this function.
 894 */
 895void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
 896                             u32 flow_label, u8 hop_limit, u8 traffic_class,
 897                             const struct ib_gid_attr *sgid_attr)
 898{
 899        rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
 900                        traffic_class);
 901        attr->grh.sgid_attr = sgid_attr;
 902}
 903EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
 904
 905/**
 906 * rdma_destroy_ah_attr - Release reference to SGID attribute of
 907 * ah attribute.
 908 * @ah_attr: Pointer to ah attribute
 909 *
 910 * Release reference to the SGID attribute of the ah attribute if it is
 911 * non NULL. It is safe to call this multiple times, and safe to call it on
 912 * a zero initialized ah_attr.
 913 */
 914void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
 915{
 916        if (ah_attr->grh.sgid_attr) {
 917                rdma_put_gid_attr(ah_attr->grh.sgid_attr);
 918                ah_attr->grh.sgid_attr = NULL;
 919        }
 920}
 921EXPORT_SYMBOL(rdma_destroy_ah_attr);
 922
 923struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
 924                                   const struct ib_grh *grh, u32 port_num)
 925{
 926        struct rdma_ah_attr ah_attr;
 927        struct ib_ah *ah;
 928        int ret;
 929
 930        ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
 931        if (ret)
 932                return ERR_PTR(ret);
 933
 934        ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
 935
 936        rdma_destroy_ah_attr(&ah_attr);
 937        return ah;
 938}
 939EXPORT_SYMBOL(ib_create_ah_from_wc);
 940
 941int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
 942{
 943        const struct ib_gid_attr *old_sgid_attr;
 944        int ret;
 945
 946        if (ah->type != ah_attr->type)
 947                return -EINVAL;
 948
 949        ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
 950        if (ret)
 951                return ret;
 952
 953        ret = ah->device->ops.modify_ah ?
 954                ah->device->ops.modify_ah(ah, ah_attr) :
 955                -EOPNOTSUPP;
 956
 957        ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
 958        rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 959        return ret;
 960}
 961EXPORT_SYMBOL(rdma_modify_ah);
 962
 963int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
 964{
 965        ah_attr->grh.sgid_attr = NULL;
 966
 967        return ah->device->ops.query_ah ?
 968                ah->device->ops.query_ah(ah, ah_attr) :
 969                -EOPNOTSUPP;
 970}
 971EXPORT_SYMBOL(rdma_query_ah);
 972
 973int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
 974{
 975        const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
 976        struct ib_pd *pd;
 977        int ret;
 978
 979        might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
 980
 981        pd = ah->pd;
 982
 983        ret = ah->device->ops.destroy_ah(ah, flags);
 984        if (ret)
 985                return ret;
 986
 987        atomic_dec(&pd->usecnt);
 988        if (sgid_attr)
 989                rdma_put_gid_attr(sgid_attr);
 990
 991        kfree(ah);
 992        return ret;
 993}
 994EXPORT_SYMBOL(rdma_destroy_ah_user);
 995
 996/* Shared receive queues */
 997
 998/**
 999 * ib_create_srq_user - Creates a SRQ associated with the specified protection
1000 *   domain.
1001 * @pd: The protection domain associated with the SRQ.
1002 * @srq_init_attr: A list of initial attributes required to create the
1003 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1004 *   the actual capabilities of the created SRQ.
1005 * @uobject: uobject pointer if this is not a kernel SRQ
1006 * @udata: udata pointer if this is not a kernel SRQ
1007 *
1008 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1009 * requested size of the SRQ, and set to the actual values allocated
1010 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1011 * will always be at least as large as the requested values.
1012 */
1013struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1014                                  struct ib_srq_init_attr *srq_init_attr,
1015                                  struct ib_usrq_object *uobject,
1016                                  struct ib_udata *udata)
1017{
1018        struct ib_srq *srq;
1019        int ret;
1020
1021        srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1022        if (!srq)
1023                return ERR_PTR(-ENOMEM);
1024
1025        srq->device = pd->device;
1026        srq->pd = pd;
1027        srq->event_handler = srq_init_attr->event_handler;
1028        srq->srq_context = srq_init_attr->srq_context;
1029        srq->srq_type = srq_init_attr->srq_type;
1030        srq->uobject = uobject;
1031
1032        if (ib_srq_has_cq(srq->srq_type)) {
1033                srq->ext.cq = srq_init_attr->ext.cq;
1034                atomic_inc(&srq->ext.cq->usecnt);
1035        }
1036        if (srq->srq_type == IB_SRQT_XRC) {
1037                srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1038                atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1039        }
1040        atomic_inc(&pd->usecnt);
1041
1042        rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1043        rdma_restrack_parent_name(&srq->res, &pd->res);
1044
1045        ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1046        if (ret) {
1047                rdma_restrack_put(&srq->res);
1048                atomic_dec(&srq->pd->usecnt);
1049                if (srq->srq_type == IB_SRQT_XRC)
1050                        atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1051                if (ib_srq_has_cq(srq->srq_type))
1052                        atomic_dec(&srq->ext.cq->usecnt);
1053                kfree(srq);
1054                return ERR_PTR(ret);
1055        }
1056
1057        rdma_restrack_add(&srq->res);
1058
1059        return srq;
1060}
1061EXPORT_SYMBOL(ib_create_srq_user);
1062
1063int ib_modify_srq(struct ib_srq *srq,
1064                  struct ib_srq_attr *srq_attr,
1065                  enum ib_srq_attr_mask srq_attr_mask)
1066{
1067        return srq->device->ops.modify_srq ?
1068                srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1069                                            NULL) : -EOPNOTSUPP;
1070}
1071EXPORT_SYMBOL(ib_modify_srq);
1072
1073int ib_query_srq(struct ib_srq *srq,
1074                 struct ib_srq_attr *srq_attr)
1075{
1076        return srq->device->ops.query_srq ?
1077                srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1078}
1079EXPORT_SYMBOL(ib_query_srq);
1080
1081int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1082{
1083        int ret;
1084
1085        if (atomic_read(&srq->usecnt))
1086                return -EBUSY;
1087
1088        ret = srq->device->ops.destroy_srq(srq, udata);
1089        if (ret)
1090                return ret;
1091
1092        atomic_dec(&srq->pd->usecnt);
1093        if (srq->srq_type == IB_SRQT_XRC)
1094                atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1095        if (ib_srq_has_cq(srq->srq_type))
1096                atomic_dec(&srq->ext.cq->usecnt);
1097        rdma_restrack_del(&srq->res);
1098        kfree(srq);
1099
1100        return ret;
1101}
1102EXPORT_SYMBOL(ib_destroy_srq_user);
1103
1104/* Queue pairs */
1105
1106static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1107{
1108        struct ib_qp *qp = context;
1109        unsigned long flags;
1110
1111        spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1112        list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1113                if (event->element.qp->event_handler)
1114                        event->element.qp->event_handler(event, event->element.qp->qp_context);
1115        spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1116}
1117
1118static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1119                                  void (*event_handler)(struct ib_event *, void *),
1120                                  void *qp_context)
1121{
1122        struct ib_qp *qp;
1123        unsigned long flags;
1124        int err;
1125
1126        qp = kzalloc(sizeof *qp, GFP_KERNEL);
1127        if (!qp)
1128                return ERR_PTR(-ENOMEM);
1129
1130        qp->real_qp = real_qp;
1131        err = ib_open_shared_qp_security(qp, real_qp->device);
1132        if (err) {
1133                kfree(qp);
1134                return ERR_PTR(err);
1135        }
1136
1137        qp->real_qp = real_qp;
1138        atomic_inc(&real_qp->usecnt);
1139        qp->device = real_qp->device;
1140        qp->event_handler = event_handler;
1141        qp->qp_context = qp_context;
1142        qp->qp_num = real_qp->qp_num;
1143        qp->qp_type = real_qp->qp_type;
1144
1145        spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1146        list_add(&qp->open_list, &real_qp->open_list);
1147        spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1148
1149        return qp;
1150}
1151
1152struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1153                         struct ib_qp_open_attr *qp_open_attr)
1154{
1155        struct ib_qp *qp, *real_qp;
1156
1157        if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1158                return ERR_PTR(-EINVAL);
1159
1160        down_read(&xrcd->tgt_qps_rwsem);
1161        real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1162        if (!real_qp) {
1163                up_read(&xrcd->tgt_qps_rwsem);
1164                return ERR_PTR(-EINVAL);
1165        }
1166        qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1167                          qp_open_attr->qp_context);
1168        up_read(&xrcd->tgt_qps_rwsem);
1169        return qp;
1170}
1171EXPORT_SYMBOL(ib_open_qp);
1172
1173static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1174                                        struct ib_qp_init_attr *qp_init_attr)
1175{
1176        struct ib_qp *real_qp = qp;
1177        int err;
1178
1179        qp->event_handler = __ib_shared_qp_event_handler;
1180        qp->qp_context = qp;
1181        qp->pd = NULL;
1182        qp->send_cq = qp->recv_cq = NULL;
1183        qp->srq = NULL;
1184        qp->xrcd = qp_init_attr->xrcd;
1185        atomic_inc(&qp_init_attr->xrcd->usecnt);
1186        INIT_LIST_HEAD(&qp->open_list);
1187
1188        qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1189                          qp_init_attr->qp_context);
1190        if (IS_ERR(qp))
1191                return qp;
1192
1193        err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1194                              real_qp, GFP_KERNEL));
1195        if (err) {
1196                ib_close_qp(qp);
1197                return ERR_PTR(err);
1198        }
1199        return qp;
1200}
1201
1202/**
1203 * ib_create_named_qp - Creates a kernel QP associated with the specified protection
1204 *   domain.
1205 * @pd: The protection domain associated with the QP.
1206 * @qp_init_attr: A list of initial attributes required to create the
1207 *   QP.  If QP creation succeeds, then the attributes are updated to
1208 *   the actual capabilities of the created QP.
1209 * @caller: caller's build-time module name
1210 *
1211 * NOTE: for user qp use ib_create_qp_user with valid udata!
1212 */
1213struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
1214                                 struct ib_qp_init_attr *qp_init_attr,
1215                                 const char *caller)
1216{
1217        struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
1218        struct ib_qp *qp;
1219        int ret;
1220
1221        if (qp_init_attr->rwq_ind_tbl &&
1222            (qp_init_attr->recv_cq ||
1223            qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1224            qp_init_attr->cap.max_recv_sge))
1225                return ERR_PTR(-EINVAL);
1226
1227        if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) &&
1228            !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER))
1229                return ERR_PTR(-EINVAL);
1230
1231        /*
1232         * If the callers is using the RDMA API calculate the resources
1233         * needed for the RDMA READ/WRITE operations.
1234         *
1235         * Note that these callers need to pass in a port number.
1236         */
1237        if (qp_init_attr->cap.max_rdma_ctxs)
1238                rdma_rw_init_qp(device, qp_init_attr);
1239
1240        qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1241        if (IS_ERR(qp))
1242                return qp;
1243
1244        ret = ib_create_qp_security(qp, device);
1245        if (ret)
1246                goto err;
1247
1248        if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1249                struct ib_qp *xrc_qp =
1250                        create_xrc_qp_user(qp, qp_init_attr);
1251
1252                if (IS_ERR(xrc_qp)) {
1253                        ret = PTR_ERR(xrc_qp);
1254                        goto err;
1255                }
1256                return xrc_qp;
1257        }
1258
1259        qp->event_handler = qp_init_attr->event_handler;
1260        qp->qp_context = qp_init_attr->qp_context;
1261        if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1262                qp->recv_cq = NULL;
1263                qp->srq = NULL;
1264        } else {
1265                qp->recv_cq = qp_init_attr->recv_cq;
1266                if (qp_init_attr->recv_cq)
1267                        atomic_inc(&qp_init_attr->recv_cq->usecnt);
1268                qp->srq = qp_init_attr->srq;
1269                if (qp->srq)
1270                        atomic_inc(&qp_init_attr->srq->usecnt);
1271        }
1272
1273        qp->send_cq = qp_init_attr->send_cq;
1274        qp->xrcd    = NULL;
1275
1276        atomic_inc(&pd->usecnt);
1277        if (qp_init_attr->send_cq)
1278                atomic_inc(&qp_init_attr->send_cq->usecnt);
1279        if (qp_init_attr->rwq_ind_tbl)
1280                atomic_inc(&qp->rwq_ind_tbl->usecnt);
1281
1282        if (qp_init_attr->cap.max_rdma_ctxs) {
1283                ret = rdma_rw_init_mrs(qp, qp_init_attr);
1284                if (ret)
1285                        goto err;
1286        }
1287
1288        /*
1289         * Note: all hw drivers guarantee that max_send_sge is lower than
1290         * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1291         * max_send_sge <= max_sge_rd.
1292         */
1293        qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1294        qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1295                                 device->attrs.max_sge_rd);
1296        if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1297                qp->integrity_en = true;
1298
1299        return qp;
1300
1301err:
1302        ib_destroy_qp(qp);
1303        return ERR_PTR(ret);
1304
1305}
1306EXPORT_SYMBOL(ib_create_named_qp);
1307
1308static const struct {
1309        int                     valid;
1310        enum ib_qp_attr_mask    req_param[IB_QPT_MAX];
1311        enum ib_qp_attr_mask    opt_param[IB_QPT_MAX];
1312} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1313        [IB_QPS_RESET] = {
1314                [IB_QPS_RESET] = { .valid = 1 },
1315                [IB_QPS_INIT]  = {
1316                        .valid = 1,
1317                        .req_param = {
1318                                [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
1319                                                IB_QP_PORT                      |
1320                                                IB_QP_QKEY),
1321                                [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1322                                [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
1323                                                IB_QP_PORT                      |
1324                                                IB_QP_ACCESS_FLAGS),
1325                                [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
1326                                                IB_QP_PORT                      |
1327                                                IB_QP_ACCESS_FLAGS),
1328                                [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
1329                                                IB_QP_PORT                      |
1330                                                IB_QP_ACCESS_FLAGS),
1331                                [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
1332                                                IB_QP_PORT                      |
1333                                                IB_QP_ACCESS_FLAGS),
1334                                [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
1335                                                IB_QP_QKEY),
1336                                [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
1337                                                IB_QP_QKEY),
1338                        }
1339                },
1340        },
1341        [IB_QPS_INIT]  = {
1342                [IB_QPS_RESET] = { .valid = 1 },
1343                [IB_QPS_ERR] =   { .valid = 1 },
1344                [IB_QPS_INIT]  = {
1345                        .valid = 1,
1346                        .opt_param = {
1347                                [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
1348                                                IB_QP_PORT                      |
1349                                                IB_QP_QKEY),
1350                                [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
1351                                                IB_QP_PORT                      |
1352                                                IB_QP_ACCESS_FLAGS),
1353                                [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
1354                                                IB_QP_PORT                      |
1355                                                IB_QP_ACCESS_FLAGS),
1356                                [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
1357                                                IB_QP_PORT                      |
1358                                                IB_QP_ACCESS_FLAGS),
1359                                [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
1360                                                IB_QP_PORT                      |
1361                                                IB_QP_ACCESS_FLAGS),
1362                                [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
1363                                                IB_QP_QKEY),
1364                                [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
1365                                                IB_QP_QKEY),
1366                        }
1367                },
1368                [IB_QPS_RTR]   = {
1369                        .valid = 1,
1370                        .req_param = {
1371                                [IB_QPT_UC]  = (IB_QP_AV                        |
1372                                                IB_QP_PATH_MTU                  |
1373                                                IB_QP_DEST_QPN                  |
1374                                                IB_QP_RQ_PSN),
1375                                [IB_QPT_RC]  = (IB_QP_AV                        |
1376                                                IB_QP_PATH_MTU                  |
1377                                                IB_QP_DEST_QPN                  |
1378                                                IB_QP_RQ_PSN                    |
1379                                                IB_QP_MAX_DEST_RD_ATOMIC        |
1380                                                IB_QP_MIN_RNR_TIMER),
1381                                [IB_QPT_XRC_INI] = (IB_QP_AV                    |
1382                                                IB_QP_PATH_MTU                  |
1383                                                IB_QP_DEST_QPN                  |
1384                                                IB_QP_RQ_PSN),
1385                                [IB_QPT_XRC_TGT] = (IB_QP_AV                    |
1386                                                IB_QP_PATH_MTU                  |
1387                                                IB_QP_DEST_QPN                  |
1388                                                IB_QP_RQ_PSN                    |
1389                                                IB_QP_MAX_DEST_RD_ATOMIC        |
1390                                                IB_QP_MIN_RNR_TIMER),
1391                        },
1392                        .opt_param = {
1393                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX               |
1394                                                 IB_QP_QKEY),
1395                                 [IB_QPT_UC]  = (IB_QP_ALT_PATH                 |
1396                                                 IB_QP_ACCESS_FLAGS             |
1397                                                 IB_QP_PKEY_INDEX),
1398                                 [IB_QPT_RC]  = (IB_QP_ALT_PATH                 |
1399                                                 IB_QP_ACCESS_FLAGS             |
1400                                                 IB_QP_PKEY_INDEX),
1401                                 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH             |
1402                                                 IB_QP_ACCESS_FLAGS             |
1403                                                 IB_QP_PKEY_INDEX),
1404                                 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH             |
1405                                                 IB_QP_ACCESS_FLAGS             |
1406                                                 IB_QP_PKEY_INDEX),
1407                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX               |
1408                                                 IB_QP_QKEY),
1409                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX               |
1410                                                 IB_QP_QKEY),
1411                         },
1412                },
1413        },
1414        [IB_QPS_RTR]   = {
1415                [IB_QPS_RESET] = { .valid = 1 },
1416                [IB_QPS_ERR] =   { .valid = 1 },
1417                [IB_QPS_RTS]   = {
1418                        .valid = 1,
1419                        .req_param = {
1420                                [IB_QPT_UD]  = IB_QP_SQ_PSN,
1421                                [IB_QPT_UC]  = IB_QP_SQ_PSN,
1422                                [IB_QPT_RC]  = (IB_QP_TIMEOUT                   |
1423                                                IB_QP_RETRY_CNT                 |
1424                                                IB_QP_RNR_RETRY                 |
1425                                                IB_QP_SQ_PSN                    |
1426                                                IB_QP_MAX_QP_RD_ATOMIC),
1427                                [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT               |
1428                                                IB_QP_RETRY_CNT                 |
1429                                                IB_QP_RNR_RETRY                 |
1430                                                IB_QP_SQ_PSN                    |
1431                                                IB_QP_MAX_QP_RD_ATOMIC),
1432                                [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT               |
1433                                                IB_QP_SQ_PSN),
1434                                [IB_QPT_SMI] = IB_QP_SQ_PSN,
1435                                [IB_QPT_GSI] = IB_QP_SQ_PSN,
1436                        },
1437                        .opt_param = {
1438                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                |
1439                                                 IB_QP_QKEY),
1440                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                |
1441                                                 IB_QP_ALT_PATH                 |
1442                                                 IB_QP_ACCESS_FLAGS             |
1443                                                 IB_QP_PATH_MIG_STATE),
1444                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                |
1445                                                 IB_QP_ALT_PATH                 |
1446                                                 IB_QP_ACCESS_FLAGS             |
1447                                                 IB_QP_MIN_RNR_TIMER            |
1448                                                 IB_QP_PATH_MIG_STATE),
1449                                 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE            |
1450                                                 IB_QP_ALT_PATH                 |
1451                                                 IB_QP_ACCESS_FLAGS             |
1452                                                 IB_QP_PATH_MIG_STATE),
1453                                 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE            |
1454                                                 IB_QP_ALT_PATH                 |
1455                                                 IB_QP_ACCESS_FLAGS             |
1456                                                 IB_QP_MIN_RNR_TIMER            |
1457                                                 IB_QP_PATH_MIG_STATE),
1458                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                |
1459                                                 IB_QP_QKEY),
1460                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                |
1461                                                 IB_QP_QKEY),
1462                                 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1463                         }
1464                }
1465        },
1466        [IB_QPS_RTS]   = {
1467                [IB_QPS_RESET] = { .valid = 1 },
1468                [IB_QPS_ERR] =   { .valid = 1 },
1469                [IB_QPS_RTS]   = {
1470                        .valid = 1,
1471                        .opt_param = {
1472                                [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
1473                                                IB_QP_QKEY),
1474                                [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
1475                                                IB_QP_ACCESS_FLAGS              |
1476                                                IB_QP_ALT_PATH                  |
1477                                                IB_QP_PATH_MIG_STATE),
1478                                [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
1479                                                IB_QP_ACCESS_FLAGS              |
1480                                                IB_QP_ALT_PATH                  |
1481                                                IB_QP_PATH_MIG_STATE            |
1482                                                IB_QP_MIN_RNR_TIMER),
1483                                [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
1484                                                IB_QP_ACCESS_FLAGS              |
1485                                                IB_QP_ALT_PATH                  |
1486                                                IB_QP_PATH_MIG_STATE),
1487                                [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
1488                                                IB_QP_ACCESS_FLAGS              |
1489                                                IB_QP_ALT_PATH                  |
1490                                                IB_QP_PATH_MIG_STATE            |
1491                                                IB_QP_MIN_RNR_TIMER),
1492                                [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
1493                                                IB_QP_QKEY),
1494                                [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
1495                                                IB_QP_QKEY),
1496                                [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1497                        }
1498                },
1499                [IB_QPS_SQD]   = {
1500                        .valid = 1,
1501                        .opt_param = {
1502                                [IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1503                                [IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1504                                [IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1505                                [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1506                                [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1507                                [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1508                                [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1509                        }
1510                },
1511        },
1512        [IB_QPS_SQD]   = {
1513                [IB_QPS_RESET] = { .valid = 1 },
1514                [IB_QPS_ERR] =   { .valid = 1 },
1515                [IB_QPS_RTS]   = {
1516                        .valid = 1,
1517                        .opt_param = {
1518                                [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
1519                                                IB_QP_QKEY),
1520                                [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
1521                                                IB_QP_ALT_PATH                  |
1522                                                IB_QP_ACCESS_FLAGS              |
1523                                                IB_QP_PATH_MIG_STATE),
1524                                [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
1525                                                IB_QP_ALT_PATH                  |
1526                                                IB_QP_ACCESS_FLAGS              |
1527                                                IB_QP_MIN_RNR_TIMER             |
1528                                                IB_QP_PATH_MIG_STATE),
1529                                [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
1530                                                IB_QP_ALT_PATH                  |
1531                                                IB_QP_ACCESS_FLAGS              |
1532                                                IB_QP_PATH_MIG_STATE),
1533                                [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
1534                                                IB_QP_ALT_PATH                  |
1535                                                IB_QP_ACCESS_FLAGS              |
1536                                                IB_QP_MIN_RNR_TIMER             |
1537                                                IB_QP_PATH_MIG_STATE),
1538                                [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
1539                                                IB_QP_QKEY),
1540                                [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
1541                                                IB_QP_QKEY),
1542                        }
1543                },
1544                [IB_QPS_SQD]   = {
1545                        .valid = 1,
1546                        .opt_param = {
1547                                [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
1548                                                IB_QP_QKEY),
1549                                [IB_QPT_UC]  = (IB_QP_AV                        |
1550                                                IB_QP_ALT_PATH                  |
1551                                                IB_QP_ACCESS_FLAGS              |
1552                                                IB_QP_PKEY_INDEX                |
1553                                                IB_QP_PATH_MIG_STATE),
1554                                [IB_QPT_RC]  = (IB_QP_PORT                      |
1555                                                IB_QP_AV                        |
1556                                                IB_QP_TIMEOUT                   |
1557                                                IB_QP_RETRY_CNT                 |
1558                                                IB_QP_RNR_RETRY                 |
1559                                                IB_QP_MAX_QP_RD_ATOMIC          |
1560                                                IB_QP_MAX_DEST_RD_ATOMIC        |
1561                                                IB_QP_ALT_PATH                  |
1562                                                IB_QP_ACCESS_FLAGS              |
1563                                                IB_QP_PKEY_INDEX                |
1564                                                IB_QP_MIN_RNR_TIMER             |
1565                                                IB_QP_PATH_MIG_STATE),
1566                                [IB_QPT_XRC_INI] = (IB_QP_PORT                  |
1567                                                IB_QP_AV                        |
1568                                                IB_QP_TIMEOUT                   |
1569                                                IB_QP_RETRY_CNT                 |
1570                                                IB_QP_RNR_RETRY                 |
1571                                                IB_QP_MAX_QP_RD_ATOMIC          |
1572                                                IB_QP_ALT_PATH                  |
1573                                                IB_QP_ACCESS_FLAGS              |
1574                                                IB_QP_PKEY_INDEX                |
1575                                                IB_QP_PATH_MIG_STATE),
1576                                [IB_QPT_XRC_TGT] = (IB_QP_PORT                  |
1577                                                IB_QP_AV                        |
1578                                                IB_QP_TIMEOUT                   |
1579                                                IB_QP_MAX_DEST_RD_ATOMIC        |
1580                                                IB_QP_ALT_PATH                  |
1581                                                IB_QP_ACCESS_FLAGS              |
1582                                                IB_QP_PKEY_INDEX                |
1583                                                IB_QP_MIN_RNR_TIMER             |
1584                                                IB_QP_PATH_MIG_STATE),
1585                                [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
1586                                                IB_QP_QKEY),
1587                                [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
1588                                                IB_QP_QKEY),
1589                        }
1590                }
1591        },
1592        [IB_QPS_SQE]   = {
1593                [IB_QPS_RESET] = { .valid = 1 },
1594                [IB_QPS_ERR] =   { .valid = 1 },
1595                [IB_QPS_RTS]   = {
1596                        .valid = 1,
1597                        .opt_param = {
1598                                [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
1599                                                IB_QP_QKEY),
1600                                [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
1601                                                IB_QP_ACCESS_FLAGS),
1602                                [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
1603                                                IB_QP_QKEY),
1604                                [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
1605                                                IB_QP_QKEY),
1606                        }
1607                }
1608        },
1609        [IB_QPS_ERR] = {
1610                [IB_QPS_RESET] = { .valid = 1 },
1611                [IB_QPS_ERR] =   { .valid = 1 }
1612        }
1613};
1614
1615bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1616                        enum ib_qp_type type, enum ib_qp_attr_mask mask)
1617{
1618        enum ib_qp_attr_mask req_param, opt_param;
1619
1620        if (mask & IB_QP_CUR_STATE  &&
1621            cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1622            cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1623                return false;
1624
1625        if (!qp_state_table[cur_state][next_state].valid)
1626                return false;
1627
1628        req_param = qp_state_table[cur_state][next_state].req_param[type];
1629        opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1630
1631        if ((mask & req_param) != req_param)
1632                return false;
1633
1634        if (mask & ~(req_param | opt_param | IB_QP_STATE))
1635                return false;
1636
1637        return true;
1638}
1639EXPORT_SYMBOL(ib_modify_qp_is_ok);
1640
1641/**
1642 * ib_resolve_eth_dmac - Resolve destination mac address
1643 * @device:             Device to consider
1644 * @ah_attr:            address handle attribute which describes the
1645 *                      source and destination parameters
1646 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1647 * returns 0 on success or appropriate error code. It initializes the
1648 * necessary ah_attr fields when call is successful.
1649 */
1650static int ib_resolve_eth_dmac(struct ib_device *device,
1651                               struct rdma_ah_attr *ah_attr)
1652{
1653        int ret = 0;
1654
1655        if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1656                if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1657                        __be32 addr = 0;
1658
1659                        memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1660                        ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1661                } else {
1662                        ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1663                                        (char *)ah_attr->roce.dmac);
1664                }
1665        } else {
1666                ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1667        }
1668        return ret;
1669}
1670
1671static bool is_qp_type_connected(const struct ib_qp *qp)
1672{
1673        return (qp->qp_type == IB_QPT_UC ||
1674                qp->qp_type == IB_QPT_RC ||
1675                qp->qp_type == IB_QPT_XRC_INI ||
1676                qp->qp_type == IB_QPT_XRC_TGT);
1677}
1678
1679/*
1680 * IB core internal function to perform QP attributes modification.
1681 */
1682static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1683                         int attr_mask, struct ib_udata *udata)
1684{
1685        u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1686        const struct ib_gid_attr *old_sgid_attr_av;
1687        const struct ib_gid_attr *old_sgid_attr_alt_av;
1688        int ret;
1689
1690        attr->xmit_slave = NULL;
1691        if (attr_mask & IB_QP_AV) {
1692                ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1693                                          &old_sgid_attr_av);
1694                if (ret)
1695                        return ret;
1696
1697                if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1698                    is_qp_type_connected(qp)) {
1699                        struct net_device *slave;
1700
1701                        /*
1702                         * If the user provided the qp_attr then we have to
1703                         * resolve it. Kerne users have to provide already
1704                         * resolved rdma_ah_attr's.
1705                         */
1706                        if (udata) {
1707                                ret = ib_resolve_eth_dmac(qp->device,
1708                                                          &attr->ah_attr);
1709                                if (ret)
1710                                        goto out_av;
1711                        }
1712                        slave = rdma_lag_get_ah_roce_slave(qp->device,
1713                                                           &attr->ah_attr,
1714                                                           GFP_KERNEL);
1715                        if (IS_ERR(slave)) {
1716                                ret = PTR_ERR(slave);
1717                                goto out_av;
1718                        }
1719                        attr->xmit_slave = slave;
1720                }
1721        }
1722        if (attr_mask & IB_QP_ALT_PATH) {
1723                /*
1724                 * FIXME: This does not track the migration state, so if the
1725                 * user loads a new alternate path after the HW has migrated
1726                 * from primary->alternate we will keep the wrong
1727                 * references. This is OK for IB because the reference
1728                 * counting does not serve any functional purpose.
1729                 */
1730                ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1731                                          &old_sgid_attr_alt_av);
1732                if (ret)
1733                        goto out_av;
1734
1735                /*
1736                 * Today the core code can only handle alternate paths and APM
1737                 * for IB. Ban them in roce mode.
1738                 */
1739                if (!(rdma_protocol_ib(qp->device,
1740                                       attr->alt_ah_attr.port_num) &&
1741                      rdma_protocol_ib(qp->device, port))) {
1742                        ret = -EINVAL;
1743                        goto out;
1744                }
1745        }
1746
1747        if (rdma_ib_or_roce(qp->device, port)) {
1748                if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1749                        dev_warn(&qp->device->dev,
1750                                 "%s rq_psn overflow, masking to 24 bits\n",
1751                                 __func__);
1752                        attr->rq_psn &= 0xffffff;
1753                }
1754
1755                if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1756                        dev_warn(&qp->device->dev,
1757                                 " %s sq_psn overflow, masking to 24 bits\n",
1758                                 __func__);
1759                        attr->sq_psn &= 0xffffff;
1760                }
1761        }
1762
1763        /*
1764         * Bind this qp to a counter automatically based on the rdma counter
1765         * rules. This only set in RST2INIT with port specified
1766         */
1767        if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1768            ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1769                rdma_counter_bind_qp_auto(qp, attr->port_num);
1770
1771        ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1772        if (ret)
1773                goto out;
1774
1775        if (attr_mask & IB_QP_PORT)
1776                qp->port = attr->port_num;
1777        if (attr_mask & IB_QP_AV)
1778                qp->av_sgid_attr =
1779                        rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1780        if (attr_mask & IB_QP_ALT_PATH)
1781                qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1782                        &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1783
1784out:
1785        if (attr_mask & IB_QP_ALT_PATH)
1786                rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1787out_av:
1788        if (attr_mask & IB_QP_AV) {
1789                rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1790                rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1791        }
1792        return ret;
1793}
1794
1795/**
1796 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1797 * @ib_qp: The QP to modify.
1798 * @attr: On input, specifies the QP attributes to modify.  On output,
1799 *   the current values of selected QP attributes are returned.
1800 * @attr_mask: A bit-mask used to specify which attributes of the QP
1801 *   are being modified.
1802 * @udata: pointer to user's input output buffer information
1803 *   are being modified.
1804 * It returns 0 on success and returns appropriate error code on error.
1805 */
1806int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1807                            int attr_mask, struct ib_udata *udata)
1808{
1809        return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1810}
1811EXPORT_SYMBOL(ib_modify_qp_with_udata);
1812
1813int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
1814{
1815        int rc;
1816        u32 netdev_speed;
1817        struct net_device *netdev;
1818        struct ethtool_link_ksettings lksettings;
1819
1820        if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1821                return -EINVAL;
1822
1823        netdev = ib_device_get_netdev(dev, port_num);
1824        if (!netdev)
1825                return -ENODEV;
1826
1827        rtnl_lock();
1828        rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1829        rtnl_unlock();
1830
1831        dev_put(netdev);
1832
1833        if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1834                netdev_speed = lksettings.base.speed;
1835        } else {
1836                netdev_speed = SPEED_1000;
1837                pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
1838                        netdev_speed);
1839        }
1840
1841        if (netdev_speed <= SPEED_1000) {
1842                *width = IB_WIDTH_1X;
1843                *speed = IB_SPEED_SDR;
1844        } else if (netdev_speed <= SPEED_10000) {
1845                *width = IB_WIDTH_1X;
1846                *speed = IB_SPEED_FDR10;
1847        } else if (netdev_speed <= SPEED_20000) {
1848                *width = IB_WIDTH_4X;
1849                *speed = IB_SPEED_DDR;
1850        } else if (netdev_speed <= SPEED_25000) {
1851                *width = IB_WIDTH_1X;
1852                *speed = IB_SPEED_EDR;
1853        } else if (netdev_speed <= SPEED_40000) {
1854                *width = IB_WIDTH_4X;
1855                *speed = IB_SPEED_FDR10;
1856        } else {
1857                *width = IB_WIDTH_4X;
1858                *speed = IB_SPEED_EDR;
1859        }
1860
1861        return 0;
1862}
1863EXPORT_SYMBOL(ib_get_eth_speed);
1864
1865int ib_modify_qp(struct ib_qp *qp,
1866                 struct ib_qp_attr *qp_attr,
1867                 int qp_attr_mask)
1868{
1869        return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1870}
1871EXPORT_SYMBOL(ib_modify_qp);
1872
1873int ib_query_qp(struct ib_qp *qp,
1874                struct ib_qp_attr *qp_attr,
1875                int qp_attr_mask,
1876                struct ib_qp_init_attr *qp_init_attr)
1877{
1878        qp_attr->ah_attr.grh.sgid_attr = NULL;
1879        qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1880
1881        return qp->device->ops.query_qp ?
1882                qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1883                                         qp_init_attr) : -EOPNOTSUPP;
1884}
1885EXPORT_SYMBOL(ib_query_qp);
1886
1887int ib_close_qp(struct ib_qp *qp)
1888{
1889        struct ib_qp *real_qp;
1890        unsigned long flags;
1891
1892        real_qp = qp->real_qp;
1893        if (real_qp == qp)
1894                return -EINVAL;
1895
1896        spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1897        list_del(&qp->open_list);
1898        spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1899
1900        atomic_dec(&real_qp->usecnt);
1901        if (qp->qp_sec)
1902                ib_close_shared_qp_security(qp->qp_sec);
1903        kfree(qp);
1904
1905        return 0;
1906}
1907EXPORT_SYMBOL(ib_close_qp);
1908
1909static int __ib_destroy_shared_qp(struct ib_qp *qp)
1910{
1911        struct ib_xrcd *xrcd;
1912        struct ib_qp *real_qp;
1913        int ret;
1914
1915        real_qp = qp->real_qp;
1916        xrcd = real_qp->xrcd;
1917        down_write(&xrcd->tgt_qps_rwsem);
1918        ib_close_qp(qp);
1919        if (atomic_read(&real_qp->usecnt) == 0)
1920                xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
1921        else
1922                real_qp = NULL;
1923        up_write(&xrcd->tgt_qps_rwsem);
1924
1925        if (real_qp) {
1926                ret = ib_destroy_qp(real_qp);
1927                if (!ret)
1928                        atomic_dec(&xrcd->usecnt);
1929        }
1930
1931        return 0;
1932}
1933
1934int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
1935{
1936        const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
1937        const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
1938        struct ib_pd *pd;
1939        struct ib_cq *scq, *rcq;
1940        struct ib_srq *srq;
1941        struct ib_rwq_ind_table *ind_tbl;
1942        struct ib_qp_security *sec;
1943        int ret;
1944
1945        WARN_ON_ONCE(qp->mrs_used > 0);
1946
1947        if (atomic_read(&qp->usecnt))
1948                return -EBUSY;
1949
1950        if (qp->real_qp != qp)
1951                return __ib_destroy_shared_qp(qp);
1952
1953        pd   = qp->pd;
1954        scq  = qp->send_cq;
1955        rcq  = qp->recv_cq;
1956        srq  = qp->srq;
1957        ind_tbl = qp->rwq_ind_tbl;
1958        sec  = qp->qp_sec;
1959        if (sec)
1960                ib_destroy_qp_security_begin(sec);
1961
1962        if (!qp->uobject)
1963                rdma_rw_cleanup_mrs(qp);
1964
1965        rdma_counter_unbind_qp(qp, true);
1966        rdma_restrack_del(&qp->res);
1967        ret = qp->device->ops.destroy_qp(qp, udata);
1968        if (!ret) {
1969                if (alt_path_sgid_attr)
1970                        rdma_put_gid_attr(alt_path_sgid_attr);
1971                if (av_sgid_attr)
1972                        rdma_put_gid_attr(av_sgid_attr);
1973                if (pd)
1974                        atomic_dec(&pd->usecnt);
1975                if (scq)
1976                        atomic_dec(&scq->usecnt);
1977                if (rcq)
1978                        atomic_dec(&rcq->usecnt);
1979                if (srq)
1980                        atomic_dec(&srq->usecnt);
1981                if (ind_tbl)
1982                        atomic_dec(&ind_tbl->usecnt);
1983                if (sec)
1984                        ib_destroy_qp_security_end(sec);
1985        } else {
1986                if (sec)
1987                        ib_destroy_qp_security_abort(sec);
1988        }
1989
1990        return ret;
1991}
1992EXPORT_SYMBOL(ib_destroy_qp_user);
1993
1994/* Completion queues */
1995
1996struct ib_cq *__ib_create_cq(struct ib_device *device,
1997                             ib_comp_handler comp_handler,
1998                             void (*event_handler)(struct ib_event *, void *),
1999                             void *cq_context,
2000                             const struct ib_cq_init_attr *cq_attr,
2001                             const char *caller)
2002{
2003        struct ib_cq *cq;
2004        int ret;
2005
2006        cq = rdma_zalloc_drv_obj(device, ib_cq);
2007        if (!cq)
2008                return ERR_PTR(-ENOMEM);
2009
2010        cq->device = device;
2011        cq->uobject = NULL;
2012        cq->comp_handler = comp_handler;
2013        cq->event_handler = event_handler;
2014        cq->cq_context = cq_context;
2015        atomic_set(&cq->usecnt, 0);
2016
2017        rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2018        rdma_restrack_set_name(&cq->res, caller);
2019
2020        ret = device->ops.create_cq(cq, cq_attr, NULL);
2021        if (ret) {
2022                rdma_restrack_put(&cq->res);
2023                kfree(cq);
2024                return ERR_PTR(ret);
2025        }
2026
2027        rdma_restrack_add(&cq->res);
2028        return cq;
2029}
2030EXPORT_SYMBOL(__ib_create_cq);
2031
2032int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2033{
2034        if (cq->shared)
2035                return -EOPNOTSUPP;
2036
2037        return cq->device->ops.modify_cq ?
2038                cq->device->ops.modify_cq(cq, cq_count,
2039                                          cq_period) : -EOPNOTSUPP;
2040}
2041EXPORT_SYMBOL(rdma_set_cq_moderation);
2042
2043int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2044{
2045        int ret;
2046
2047        if (WARN_ON_ONCE(cq->shared))
2048                return -EOPNOTSUPP;
2049
2050        if (atomic_read(&cq->usecnt))
2051                return -EBUSY;
2052
2053        ret = cq->device->ops.destroy_cq(cq, udata);
2054        if (ret)
2055                return ret;
2056
2057        rdma_restrack_del(&cq->res);
2058        kfree(cq);
2059        return ret;
2060}
2061EXPORT_SYMBOL(ib_destroy_cq_user);
2062
2063int ib_resize_cq(struct ib_cq *cq, int cqe)
2064{
2065        if (cq->shared)
2066                return -EOPNOTSUPP;
2067
2068        return cq->device->ops.resize_cq ?
2069                cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2070}
2071EXPORT_SYMBOL(ib_resize_cq);
2072
2073/* Memory regions */
2074
2075struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2076                             u64 virt_addr, int access_flags)
2077{
2078        struct ib_mr *mr;
2079
2080        if (access_flags & IB_ACCESS_ON_DEMAND) {
2081                if (!(pd->device->attrs.device_cap_flags &
2082                      IB_DEVICE_ON_DEMAND_PAGING)) {
2083                        pr_debug("ODP support not available\n");
2084                        return ERR_PTR(-EINVAL);
2085                }
2086        }
2087
2088        mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2089                                         access_flags, NULL);
2090
2091        if (IS_ERR(mr))
2092                return mr;
2093
2094        mr->device = pd->device;
2095        mr->pd = pd;
2096        mr->dm = NULL;
2097        atomic_inc(&pd->usecnt);
2098
2099        rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2100        rdma_restrack_parent_name(&mr->res, &pd->res);
2101        rdma_restrack_add(&mr->res);
2102
2103        return mr;
2104}
2105EXPORT_SYMBOL(ib_reg_user_mr);
2106
2107int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2108                 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2109{
2110        if (!pd->device->ops.advise_mr)
2111                return -EOPNOTSUPP;
2112
2113        if (!num_sge)
2114                return 0;
2115
2116        return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2117                                         NULL);
2118}
2119EXPORT_SYMBOL(ib_advise_mr);
2120
2121int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2122{
2123        struct ib_pd *pd = mr->pd;
2124        struct ib_dm *dm = mr->dm;
2125        struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2126        int ret;
2127
2128        trace_mr_dereg(mr);
2129        rdma_restrack_del(&mr->res);
2130        ret = mr->device->ops.dereg_mr(mr, udata);
2131        if (!ret) {
2132                atomic_dec(&pd->usecnt);
2133                if (dm)
2134                        atomic_dec(&dm->usecnt);
2135                kfree(sig_attrs);
2136        }
2137
2138        return ret;
2139}
2140EXPORT_SYMBOL(ib_dereg_mr_user);
2141
2142/**
2143 * ib_alloc_mr() - Allocates a memory region
2144 * @pd:            protection domain associated with the region
2145 * @mr_type:       memory region type
2146 * @max_num_sg:    maximum sg entries available for registration.
2147 *
2148 * Notes:
2149 * Memory registeration page/sg lists must not exceed max_num_sg.
2150 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2151 * max_num_sg * used_page_size.
2152 *
2153 */
2154struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2155                          u32 max_num_sg)
2156{
2157        struct ib_mr *mr;
2158
2159        if (!pd->device->ops.alloc_mr) {
2160                mr = ERR_PTR(-EOPNOTSUPP);
2161                goto out;
2162        }
2163
2164        if (mr_type == IB_MR_TYPE_INTEGRITY) {
2165                WARN_ON_ONCE(1);
2166                mr = ERR_PTR(-EINVAL);
2167                goto out;
2168        }
2169
2170        mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2171        if (IS_ERR(mr))
2172                goto out;
2173
2174        mr->device = pd->device;
2175        mr->pd = pd;
2176        mr->dm = NULL;
2177        mr->uobject = NULL;
2178        atomic_inc(&pd->usecnt);
2179        mr->need_inval = false;
2180        mr->type = mr_type;
2181        mr->sig_attrs = NULL;
2182
2183        rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2184        rdma_restrack_parent_name(&mr->res, &pd->res);
2185        rdma_restrack_add(&mr->res);
2186out:
2187        trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2188        return mr;
2189}
2190EXPORT_SYMBOL(ib_alloc_mr);
2191
2192/**
2193 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2194 * @pd:                      protection domain associated with the region
2195 * @max_num_data_sg:         maximum data sg entries available for registration
2196 * @max_num_meta_sg:         maximum metadata sg entries available for
2197 *                           registration
2198 *
2199 * Notes:
2200 * Memory registration page/sg lists must not exceed max_num_sg,
2201 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2202 *
2203 */
2204struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2205                                    u32 max_num_data_sg,
2206                                    u32 max_num_meta_sg)
2207{
2208        struct ib_mr *mr;
2209        struct ib_sig_attrs *sig_attrs;
2210
2211        if (!pd->device->ops.alloc_mr_integrity ||
2212            !pd->device->ops.map_mr_sg_pi) {
2213                mr = ERR_PTR(-EOPNOTSUPP);
2214                goto out;
2215        }
2216
2217        if (!max_num_meta_sg) {
2218                mr = ERR_PTR(-EINVAL);
2219                goto out;
2220        }
2221
2222        sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2223        if (!sig_attrs) {
2224                mr = ERR_PTR(-ENOMEM);
2225                goto out;
2226        }
2227
2228        mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2229                                                max_num_meta_sg);
2230        if (IS_ERR(mr)) {
2231                kfree(sig_attrs);
2232                goto out;
2233        }
2234
2235        mr->device = pd->device;
2236        mr->pd = pd;
2237        mr->dm = NULL;
2238        mr->uobject = NULL;
2239        atomic_inc(&pd->usecnt);
2240        mr->need_inval = false;
2241        mr->type = IB_MR_TYPE_INTEGRITY;
2242        mr->sig_attrs = sig_attrs;
2243
2244        rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2245        rdma_restrack_parent_name(&mr->res, &pd->res);
2246        rdma_restrack_add(&mr->res);
2247out:
2248        trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2249        return mr;
2250}
2251EXPORT_SYMBOL(ib_alloc_mr_integrity);
2252
2253/* Multicast groups */
2254
2255static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2256{
2257        struct ib_qp_init_attr init_attr = {};
2258        struct ib_qp_attr attr = {};
2259        int num_eth_ports = 0;
2260        unsigned int port;
2261
2262        /* If QP state >= init, it is assigned to a port and we can check this
2263         * port only.
2264         */
2265        if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2266                if (attr.qp_state >= IB_QPS_INIT) {
2267                        if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2268                            IB_LINK_LAYER_INFINIBAND)
2269                                return true;
2270                        goto lid_check;
2271                }
2272        }
2273
2274        /* Can't get a quick answer, iterate over all ports */
2275        rdma_for_each_port(qp->device, port)
2276                if (rdma_port_get_link_layer(qp->device, port) !=
2277                    IB_LINK_LAYER_INFINIBAND)
2278                        num_eth_ports++;
2279
2280        /* If we have at lease one Ethernet port, RoCE annex declares that
2281         * multicast LID should be ignored. We can't tell at this step if the
2282         * QP belongs to an IB or Ethernet port.
2283         */
2284        if (num_eth_ports)
2285                return true;
2286
2287        /* If all the ports are IB, we can check according to IB spec. */
2288lid_check:
2289        return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2290                 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2291}
2292
2293int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2294{
2295        int ret;
2296
2297        if (!qp->device->ops.attach_mcast)
2298                return -EOPNOTSUPP;
2299
2300        if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2301            qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2302                return -EINVAL;
2303
2304        ret = qp->device->ops.attach_mcast(qp, gid, lid);
2305        if (!ret)
2306                atomic_inc(&qp->usecnt);
2307        return ret;
2308}
2309EXPORT_SYMBOL(ib_attach_mcast);
2310
2311int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2312{
2313        int ret;
2314
2315        if (!qp->device->ops.detach_mcast)
2316                return -EOPNOTSUPP;
2317
2318        if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2319            qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2320                return -EINVAL;
2321
2322        ret = qp->device->ops.detach_mcast(qp, gid, lid);
2323        if (!ret)
2324                atomic_dec(&qp->usecnt);
2325        return ret;
2326}
2327EXPORT_SYMBOL(ib_detach_mcast);
2328
2329/**
2330 * ib_alloc_xrcd_user - Allocates an XRC domain.
2331 * @device: The device on which to allocate the XRC domain.
2332 * @inode: inode to connect XRCD
2333 * @udata: Valid user data or NULL for kernel object
2334 */
2335struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2336                                   struct inode *inode, struct ib_udata *udata)
2337{
2338        struct ib_xrcd *xrcd;
2339        int ret;
2340
2341        if (!device->ops.alloc_xrcd)
2342                return ERR_PTR(-EOPNOTSUPP);
2343
2344        xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2345        if (!xrcd)
2346                return ERR_PTR(-ENOMEM);
2347
2348        xrcd->device = device;
2349        xrcd->inode = inode;
2350        atomic_set(&xrcd->usecnt, 0);
2351        init_rwsem(&xrcd->tgt_qps_rwsem);
2352        xa_init(&xrcd->tgt_qps);
2353
2354        ret = device->ops.alloc_xrcd(xrcd, udata);
2355        if (ret)
2356                goto err;
2357        return xrcd;
2358err:
2359        kfree(xrcd);
2360        return ERR_PTR(ret);
2361}
2362EXPORT_SYMBOL(ib_alloc_xrcd_user);
2363
2364/**
2365 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2366 * @xrcd: The XRC domain to deallocate.
2367 * @udata: Valid user data or NULL for kernel object
2368 */
2369int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2370{
2371        int ret;
2372
2373        if (atomic_read(&xrcd->usecnt))
2374                return -EBUSY;
2375
2376        WARN_ON(!xa_empty(&xrcd->tgt_qps));
2377        ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2378        if (ret)
2379                return ret;
2380        kfree(xrcd);
2381        return ret;
2382}
2383EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2384
2385/**
2386 * ib_create_wq - Creates a WQ associated with the specified protection
2387 * domain.
2388 * @pd: The protection domain associated with the WQ.
2389 * @wq_attr: A list of initial attributes required to create the
2390 * WQ. If WQ creation succeeds, then the attributes are updated to
2391 * the actual capabilities of the created WQ.
2392 *
2393 * wq_attr->max_wr and wq_attr->max_sge determine
2394 * the requested size of the WQ, and set to the actual values allocated
2395 * on return.
2396 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2397 * at least as large as the requested values.
2398 */
2399struct ib_wq *ib_create_wq(struct ib_pd *pd,
2400                           struct ib_wq_init_attr *wq_attr)
2401{
2402        struct ib_wq *wq;
2403
2404        if (!pd->device->ops.create_wq)
2405                return ERR_PTR(-EOPNOTSUPP);
2406
2407        wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2408        if (!IS_ERR(wq)) {
2409                wq->event_handler = wq_attr->event_handler;
2410                wq->wq_context = wq_attr->wq_context;
2411                wq->wq_type = wq_attr->wq_type;
2412                wq->cq = wq_attr->cq;
2413                wq->device = pd->device;
2414                wq->pd = pd;
2415                wq->uobject = NULL;
2416                atomic_inc(&pd->usecnt);
2417                atomic_inc(&wq_attr->cq->usecnt);
2418                atomic_set(&wq->usecnt, 0);
2419        }
2420        return wq;
2421}
2422EXPORT_SYMBOL(ib_create_wq);
2423
2424/**
2425 * ib_destroy_wq_user - Destroys the specified user WQ.
2426 * @wq: The WQ to destroy.
2427 * @udata: Valid user data
2428 */
2429int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2430{
2431        struct ib_cq *cq = wq->cq;
2432        struct ib_pd *pd = wq->pd;
2433        int ret;
2434
2435        if (atomic_read(&wq->usecnt))
2436                return -EBUSY;
2437
2438        ret = wq->device->ops.destroy_wq(wq, udata);
2439        if (ret)
2440                return ret;
2441
2442        atomic_dec(&pd->usecnt);
2443        atomic_dec(&cq->usecnt);
2444        return ret;
2445}
2446EXPORT_SYMBOL(ib_destroy_wq_user);
2447
2448int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2449                       struct ib_mr_status *mr_status)
2450{
2451        if (!mr->device->ops.check_mr_status)
2452                return -EOPNOTSUPP;
2453
2454        return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2455}
2456EXPORT_SYMBOL(ib_check_mr_status);
2457
2458int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2459                         int state)
2460{
2461        if (!device->ops.set_vf_link_state)
2462                return -EOPNOTSUPP;
2463
2464        return device->ops.set_vf_link_state(device, vf, port, state);
2465}
2466EXPORT_SYMBOL(ib_set_vf_link_state);
2467
2468int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2469                     struct ifla_vf_info *info)
2470{
2471        if (!device->ops.get_vf_config)
2472                return -EOPNOTSUPP;
2473
2474        return device->ops.get_vf_config(device, vf, port, info);
2475}
2476EXPORT_SYMBOL(ib_get_vf_config);
2477
2478int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2479                    struct ifla_vf_stats *stats)
2480{
2481        if (!device->ops.get_vf_stats)
2482                return -EOPNOTSUPP;
2483
2484        return device->ops.get_vf_stats(device, vf, port, stats);
2485}
2486EXPORT_SYMBOL(ib_get_vf_stats);
2487
2488int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2489                   int type)
2490{
2491        if (!device->ops.set_vf_guid)
2492                return -EOPNOTSUPP;
2493
2494        return device->ops.set_vf_guid(device, vf, port, guid, type);
2495}
2496EXPORT_SYMBOL(ib_set_vf_guid);
2497
2498int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2499                   struct ifla_vf_guid *node_guid,
2500                   struct ifla_vf_guid *port_guid)
2501{
2502        if (!device->ops.get_vf_guid)
2503                return -EOPNOTSUPP;
2504
2505        return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2506}
2507EXPORT_SYMBOL(ib_get_vf_guid);
2508/**
2509 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2510 *     information) and set an appropriate memory region for registration.
2511 * @mr:             memory region
2512 * @data_sg:        dma mapped scatterlist for data
2513 * @data_sg_nents:  number of entries in data_sg
2514 * @data_sg_offset: offset in bytes into data_sg
2515 * @meta_sg:        dma mapped scatterlist for metadata
2516 * @meta_sg_nents:  number of entries in meta_sg
2517 * @meta_sg_offset: offset in bytes into meta_sg
2518 * @page_size:      page vector desired page size
2519 *
2520 * Constraints:
2521 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2522 *
2523 * Return: 0 on success.
2524 *
2525 * After this completes successfully, the  memory region
2526 * is ready for registration.
2527 */
2528int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2529                    int data_sg_nents, unsigned int *data_sg_offset,
2530                    struct scatterlist *meta_sg, int meta_sg_nents,
2531                    unsigned int *meta_sg_offset, unsigned int page_size)
2532{
2533        if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2534                     WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2535                return -EOPNOTSUPP;
2536
2537        mr->page_size = page_size;
2538
2539        return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2540                                            data_sg_offset, meta_sg,
2541                                            meta_sg_nents, meta_sg_offset);
2542}
2543EXPORT_SYMBOL(ib_map_mr_sg_pi);
2544
2545/**
2546 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2547 *     and set it the memory region.
2548 * @mr:            memory region
2549 * @sg:            dma mapped scatterlist
2550 * @sg_nents:      number of entries in sg
2551 * @sg_offset:     offset in bytes into sg
2552 * @page_size:     page vector desired page size
2553 *
2554 * Constraints:
2555 *
2556 * - The first sg element is allowed to have an offset.
2557 * - Each sg element must either be aligned to page_size or virtually
2558 *   contiguous to the previous element. In case an sg element has a
2559 *   non-contiguous offset, the mapping prefix will not include it.
2560 * - The last sg element is allowed to have length less than page_size.
2561 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2562 *   then only max_num_sg entries will be mapped.
2563 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2564 *   constraints holds and the page_size argument is ignored.
2565 *
2566 * Returns the number of sg elements that were mapped to the memory region.
2567 *
2568 * After this completes successfully, the  memory region
2569 * is ready for registration.
2570 */
2571int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2572                 unsigned int *sg_offset, unsigned int page_size)
2573{
2574        if (unlikely(!mr->device->ops.map_mr_sg))
2575                return -EOPNOTSUPP;
2576
2577        mr->page_size = page_size;
2578
2579        return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2580}
2581EXPORT_SYMBOL(ib_map_mr_sg);
2582
2583/**
2584 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2585 *     to a page vector
2586 * @mr:            memory region
2587 * @sgl:           dma mapped scatterlist
2588 * @sg_nents:      number of entries in sg
2589 * @sg_offset_p:   ==== =======================================================
2590 *                 IN   start offset in bytes into sg
2591 *                 OUT  offset in bytes for element n of the sg of the first
2592 *                      byte that has not been processed where n is the return
2593 *                      value of this function.
2594 *                 ==== =======================================================
2595 * @set_page:      driver page assignment function pointer
2596 *
2597 * Core service helper for drivers to convert the largest
2598 * prefix of given sg list to a page vector. The sg list
2599 * prefix converted is the prefix that meet the requirements
2600 * of ib_map_mr_sg.
2601 *
2602 * Returns the number of sg elements that were assigned to
2603 * a page vector.
2604 */
2605int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2606                unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2607{
2608        struct scatterlist *sg;
2609        u64 last_end_dma_addr = 0;
2610        unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2611        unsigned int last_page_off = 0;
2612        u64 page_mask = ~((u64)mr->page_size - 1);
2613        int i, ret;
2614
2615        if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2616                return -EINVAL;
2617
2618        mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2619        mr->length = 0;
2620
2621        for_each_sg(sgl, sg, sg_nents, i) {
2622                u64 dma_addr = sg_dma_address(sg) + sg_offset;
2623                u64 prev_addr = dma_addr;
2624                unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2625                u64 end_dma_addr = dma_addr + dma_len;
2626                u64 page_addr = dma_addr & page_mask;
2627
2628                /*
2629                 * For the second and later elements, check whether either the
2630                 * end of element i-1 or the start of element i is not aligned
2631                 * on a page boundary.
2632                 */
2633                if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2634                        /* Stop mapping if there is a gap. */
2635                        if (last_end_dma_addr != dma_addr)
2636                                break;
2637
2638                        /*
2639                         * Coalesce this element with the last. If it is small
2640                         * enough just update mr->length. Otherwise start
2641                         * mapping from the next page.
2642                         */
2643                        goto next_page;
2644                }
2645
2646                do {
2647                        ret = set_page(mr, page_addr);
2648                        if (unlikely(ret < 0)) {
2649                                sg_offset = prev_addr - sg_dma_address(sg);
2650                                mr->length += prev_addr - dma_addr;
2651                                if (sg_offset_p)
2652                                        *sg_offset_p = sg_offset;
2653                                return i || sg_offset ? i : ret;
2654                        }
2655                        prev_addr = page_addr;
2656next_page:
2657                        page_addr += mr->page_size;
2658                } while (page_addr < end_dma_addr);
2659
2660                mr->length += dma_len;
2661                last_end_dma_addr = end_dma_addr;
2662                last_page_off = end_dma_addr & ~page_mask;
2663
2664                sg_offset = 0;
2665        }
2666
2667        if (sg_offset_p)
2668                *sg_offset_p = 0;
2669        return i;
2670}
2671EXPORT_SYMBOL(ib_sg_to_pages);
2672
2673struct ib_drain_cqe {
2674        struct ib_cqe cqe;
2675        struct completion done;
2676};
2677
2678static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2679{
2680        struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2681                                                cqe);
2682
2683        complete(&cqe->done);
2684}
2685
2686/*
2687 * Post a WR and block until its completion is reaped for the SQ.
2688 */
2689static void __ib_drain_sq(struct ib_qp *qp)
2690{
2691        struct ib_cq *cq = qp->send_cq;
2692        struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2693        struct ib_drain_cqe sdrain;
2694        struct ib_rdma_wr swr = {
2695                .wr = {
2696                        .next = NULL,
2697                        { .wr_cqe       = &sdrain.cqe, },
2698                        .opcode = IB_WR_RDMA_WRITE,
2699                },
2700        };
2701        int ret;
2702
2703        ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2704        if (ret) {
2705                WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2706                return;
2707        }
2708
2709        sdrain.cqe.done = ib_drain_qp_done;
2710        init_completion(&sdrain.done);
2711
2712        ret = ib_post_send(qp, &swr.wr, NULL);
2713        if (ret) {
2714                WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2715                return;
2716        }
2717
2718        if (cq->poll_ctx == IB_POLL_DIRECT)
2719                while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2720                        ib_process_cq_direct(cq, -1);
2721        else
2722                wait_for_completion(&sdrain.done);
2723}
2724
2725/*
2726 * Post a WR and block until its completion is reaped for the RQ.
2727 */
2728static void __ib_drain_rq(struct ib_qp *qp)
2729{
2730        struct ib_cq *cq = qp->recv_cq;
2731        struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2732        struct ib_drain_cqe rdrain;
2733        struct ib_recv_wr rwr = {};
2734        int ret;
2735
2736        ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2737        if (ret) {
2738                WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2739                return;
2740        }
2741
2742        rwr.wr_cqe = &rdrain.cqe;
2743        rdrain.cqe.done = ib_drain_qp_done;
2744        init_completion(&rdrain.done);
2745
2746        ret = ib_post_recv(qp, &rwr, NULL);
2747        if (ret) {
2748                WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2749                return;
2750        }
2751
2752        if (cq->poll_ctx == IB_POLL_DIRECT)
2753                while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2754                        ib_process_cq_direct(cq, -1);
2755        else
2756                wait_for_completion(&rdrain.done);
2757}
2758
2759/**
2760 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2761 *                 application.
2762 * @qp:            queue pair to drain
2763 *
2764 * If the device has a provider-specific drain function, then
2765 * call that.  Otherwise call the generic drain function
2766 * __ib_drain_sq().
2767 *
2768 * The caller must:
2769 *
2770 * ensure there is room in the CQ and SQ for the drain work request and
2771 * completion.
2772 *
2773 * allocate the CQ using ib_alloc_cq().
2774 *
2775 * ensure that there are no other contexts that are posting WRs concurrently.
2776 * Otherwise the drain is not guaranteed.
2777 */
2778void ib_drain_sq(struct ib_qp *qp)
2779{
2780        if (qp->device->ops.drain_sq)
2781                qp->device->ops.drain_sq(qp);
2782        else
2783                __ib_drain_sq(qp);
2784        trace_cq_drain_complete(qp->send_cq);
2785}
2786EXPORT_SYMBOL(ib_drain_sq);
2787
2788/**
2789 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2790 *                 application.
2791 * @qp:            queue pair to drain
2792 *
2793 * If the device has a provider-specific drain function, then
2794 * call that.  Otherwise call the generic drain function
2795 * __ib_drain_rq().
2796 *
2797 * The caller must:
2798 *
2799 * ensure there is room in the CQ and RQ for the drain work request and
2800 * completion.
2801 *
2802 * allocate the CQ using ib_alloc_cq().
2803 *
2804 * ensure that there are no other contexts that are posting WRs concurrently.
2805 * Otherwise the drain is not guaranteed.
2806 */
2807void ib_drain_rq(struct ib_qp *qp)
2808{
2809        if (qp->device->ops.drain_rq)
2810                qp->device->ops.drain_rq(qp);
2811        else
2812                __ib_drain_rq(qp);
2813        trace_cq_drain_complete(qp->recv_cq);
2814}
2815EXPORT_SYMBOL(ib_drain_rq);
2816
2817/**
2818 * ib_drain_qp() - Block until all CQEs have been consumed by the
2819 *                 application on both the RQ and SQ.
2820 * @qp:            queue pair to drain
2821 *
2822 * The caller must:
2823 *
2824 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2825 * and completions.
2826 *
2827 * allocate the CQs using ib_alloc_cq().
2828 *
2829 * ensure that there are no other contexts that are posting WRs concurrently.
2830 * Otherwise the drain is not guaranteed.
2831 */
2832void ib_drain_qp(struct ib_qp *qp)
2833{
2834        ib_drain_sq(qp);
2835        if (!qp->srq)
2836                ib_drain_rq(qp);
2837}
2838EXPORT_SYMBOL(ib_drain_qp);
2839
2840struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
2841                                     enum rdma_netdev_t type, const char *name,
2842                                     unsigned char name_assign_type,
2843                                     void (*setup)(struct net_device *))
2844{
2845        struct rdma_netdev_alloc_params params;
2846        struct net_device *netdev;
2847        int rc;
2848
2849        if (!device->ops.rdma_netdev_get_params)
2850                return ERR_PTR(-EOPNOTSUPP);
2851
2852        rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2853                                                &params);
2854        if (rc)
2855                return ERR_PTR(rc);
2856
2857        netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2858                                  setup, params.txqs, params.rxqs);
2859        if (!netdev)
2860                return ERR_PTR(-ENOMEM);
2861
2862        return netdev;
2863}
2864EXPORT_SYMBOL(rdma_alloc_netdev);
2865
2866int rdma_init_netdev(struct ib_device *device, u32 port_num,
2867                     enum rdma_netdev_t type, const char *name,
2868                     unsigned char name_assign_type,
2869                     void (*setup)(struct net_device *),
2870                     struct net_device *netdev)
2871{
2872        struct rdma_netdev_alloc_params params;
2873        int rc;
2874
2875        if (!device->ops.rdma_netdev_get_params)
2876                return -EOPNOTSUPP;
2877
2878        rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2879                                                &params);
2880        if (rc)
2881                return rc;
2882
2883        return params.initialize_rdma_netdev(device, port_num,
2884                                             netdev, params.param);
2885}
2886EXPORT_SYMBOL(rdma_init_netdev);
2887
2888void __rdma_block_iter_start(struct ib_block_iter *biter,
2889                             struct scatterlist *sglist, unsigned int nents,
2890                             unsigned long pgsz)
2891{
2892        memset(biter, 0, sizeof(struct ib_block_iter));
2893        biter->__sg = sglist;
2894        biter->__sg_nents = nents;
2895
2896        /* Driver provides best block size to use */
2897        biter->__pg_bit = __fls(pgsz);
2898}
2899EXPORT_SYMBOL(__rdma_block_iter_start);
2900
2901bool __rdma_block_iter_next(struct ib_block_iter *biter)
2902{
2903        unsigned int block_offset;
2904
2905        if (!biter->__sg_nents || !biter->__sg)
2906                return false;
2907
2908        biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2909        block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2910        biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2911
2912        if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2913                biter->__sg_advance = 0;
2914                biter->__sg = sg_next(biter->__sg);
2915                biter->__sg_nents--;
2916        }
2917
2918        return true;
2919}
2920EXPORT_SYMBOL(__rdma_block_iter_next);
2921
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.