linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51
  52#include <linux/atomic.h>
  53#include <asm/uaccess.h>
  54
  55extern struct workqueue_struct *ib_wq;
  56
  57union ib_gid {
  58        u8      raw[16];
  59        struct {
  60                __be64  subnet_prefix;
  61                __be64  interface_id;
  62        } global;
  63};
  64
  65enum rdma_node_type {
  66        /* IB values map to NodeInfo:NodeType. */
  67        RDMA_NODE_IB_CA         = 1,
  68        RDMA_NODE_IB_SWITCH,
  69        RDMA_NODE_IB_ROUTER,
  70        RDMA_NODE_RNIC
  71};
  72
  73enum rdma_transport_type {
  74        RDMA_TRANSPORT_IB,
  75        RDMA_TRANSPORT_IWARP
  76};
  77
  78enum rdma_transport_type
  79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
  80
  81enum rdma_link_layer {
  82        IB_LINK_LAYER_UNSPECIFIED,
  83        IB_LINK_LAYER_INFINIBAND,
  84        IB_LINK_LAYER_ETHERNET,
  85};
  86
  87enum ib_device_cap_flags {
  88        IB_DEVICE_RESIZE_MAX_WR         = 1,
  89        IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
  90        IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),
  91        IB_DEVICE_RAW_MULTI             = (1<<3),
  92        IB_DEVICE_AUTO_PATH_MIG         = (1<<4),
  93        IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),
  94        IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),
  95        IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),
  96        IB_DEVICE_SHUTDOWN_PORT         = (1<<8),
  97        IB_DEVICE_INIT_TYPE             = (1<<9),
  98        IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),
  99        IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),
 100        IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
 101        IB_DEVICE_SRQ_RESIZE            = (1<<13),
 102        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
 103        IB_DEVICE_LOCAL_DMA_LKEY        = (1<<15),
 104        IB_DEVICE_RESERVED              = (1<<16), /* old SEND_W_INV */
 105        IB_DEVICE_MEM_WINDOW            = (1<<17),
 106        /*
 107         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 108         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 109         * messages and can verify the validity of checksum for
 110         * incoming messages.  Setting this flag implies that the
 111         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 112         */
 113        IB_DEVICE_UD_IP_CSUM            = (1<<18),
 114        IB_DEVICE_UD_TSO                = (1<<19),
 115        IB_DEVICE_XRC                   = (1<<20),
 116        IB_DEVICE_MEM_MGT_EXTENSIONS    = (1<<21),
 117        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 118};
 119
 120enum ib_atomic_cap {
 121        IB_ATOMIC_NONE,
 122        IB_ATOMIC_HCA,
 123        IB_ATOMIC_GLOB
 124};
 125
 126struct ib_device_attr {
 127        u64                     fw_ver;
 128        __be64                  sys_image_guid;
 129        u64                     max_mr_size;
 130        u64                     page_size_cap;
 131        u32                     vendor_id;
 132        u32                     vendor_part_id;
 133        u32                     hw_ver;
 134        int                     max_qp;
 135        int                     max_qp_wr;
 136        int                     device_cap_flags;
 137        int                     max_sge;
 138        int                     max_sge_rd;
 139        int                     max_cq;
 140        int                     max_cqe;
 141        int                     max_mr;
 142        int                     max_pd;
 143        int                     max_qp_rd_atom;
 144        int                     max_ee_rd_atom;
 145        int                     max_res_rd_atom;
 146        int                     max_qp_init_rd_atom;
 147        int                     max_ee_init_rd_atom;
 148        enum ib_atomic_cap      atomic_cap;
 149        enum ib_atomic_cap      masked_atomic_cap;
 150        int                     max_ee;
 151        int                     max_rdd;
 152        int                     max_mw;
 153        int                     max_raw_ipv6_qp;
 154        int                     max_raw_ethy_qp;
 155        int                     max_mcast_grp;
 156        int                     max_mcast_qp_attach;
 157        int                     max_total_mcast_qp_attach;
 158        int                     max_ah;
 159        int                     max_fmr;
 160        int                     max_map_per_fmr;
 161        int                     max_srq;
 162        int                     max_srq_wr;
 163        int                     max_srq_sge;
 164        unsigned int            max_fast_reg_page_list_len;
 165        u16                     max_pkeys;
 166        u8                      local_ca_ack_delay;
 167};
 168
 169enum ib_mtu {
 170        IB_MTU_256  = 1,
 171        IB_MTU_512  = 2,
 172        IB_MTU_1024 = 3,
 173        IB_MTU_2048 = 4,
 174        IB_MTU_4096 = 5
 175};
 176
 177static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 178{
 179        switch (mtu) {
 180        case IB_MTU_256:  return  256;
 181        case IB_MTU_512:  return  512;
 182        case IB_MTU_1024: return 1024;
 183        case IB_MTU_2048: return 2048;
 184        case IB_MTU_4096: return 4096;
 185        default:          return -1;
 186        }
 187}
 188
 189enum ib_port_state {
 190        IB_PORT_NOP             = 0,
 191        IB_PORT_DOWN            = 1,
 192        IB_PORT_INIT            = 2,
 193        IB_PORT_ARMED           = 3,
 194        IB_PORT_ACTIVE          = 4,
 195        IB_PORT_ACTIVE_DEFER    = 5
 196};
 197
 198enum ib_port_cap_flags {
 199        IB_PORT_SM                              = 1 <<  1,
 200        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 201        IB_PORT_TRAP_SUP                        = 1 <<  3,
 202        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 203        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 204        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 205        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 206        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 207        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 208        IB_PORT_SM_DISABLED                     = 1 << 10,
 209        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 210        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 211        IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 212        IB_PORT_CM_SUP                          = 1 << 16,
 213        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 214        IB_PORT_REINIT_SUP                      = 1 << 18,
 215        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 216        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 217        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 218        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 219        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 220        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 221        IB_PORT_CLIENT_REG_SUP                  = 1 << 25
 222};
 223
 224enum ib_port_width {
 225        IB_WIDTH_1X     = 1,
 226        IB_WIDTH_4X     = 2,
 227        IB_WIDTH_8X     = 4,
 228        IB_WIDTH_12X    = 8
 229};
 230
 231static inline int ib_width_enum_to_int(enum ib_port_width width)
 232{
 233        switch (width) {
 234        case IB_WIDTH_1X:  return  1;
 235        case IB_WIDTH_4X:  return  4;
 236        case IB_WIDTH_8X:  return  8;
 237        case IB_WIDTH_12X: return 12;
 238        default:          return -1;
 239        }
 240}
 241
 242enum ib_port_speed {
 243        IB_SPEED_SDR    = 1,
 244        IB_SPEED_DDR    = 2,
 245        IB_SPEED_QDR    = 4,
 246        IB_SPEED_FDR10  = 8,
 247        IB_SPEED_FDR    = 16,
 248        IB_SPEED_EDR    = 32
 249};
 250
 251struct ib_protocol_stats {
 252        /* TBD... */
 253};
 254
 255struct iw_protocol_stats {
 256        u64     ipInReceives;
 257        u64     ipInHdrErrors;
 258        u64     ipInTooBigErrors;
 259        u64     ipInNoRoutes;
 260        u64     ipInAddrErrors;
 261        u64     ipInUnknownProtos;
 262        u64     ipInTruncatedPkts;
 263        u64     ipInDiscards;
 264        u64     ipInDelivers;
 265        u64     ipOutForwDatagrams;
 266        u64     ipOutRequests;
 267        u64     ipOutDiscards;
 268        u64     ipOutNoRoutes;
 269        u64     ipReasmTimeout;
 270        u64     ipReasmReqds;
 271        u64     ipReasmOKs;
 272        u64     ipReasmFails;
 273        u64     ipFragOKs;
 274        u64     ipFragFails;
 275        u64     ipFragCreates;
 276        u64     ipInMcastPkts;
 277        u64     ipOutMcastPkts;
 278        u64     ipInBcastPkts;
 279        u64     ipOutBcastPkts;
 280
 281        u64     tcpRtoAlgorithm;
 282        u64     tcpRtoMin;
 283        u64     tcpRtoMax;
 284        u64     tcpMaxConn;
 285        u64     tcpActiveOpens;
 286        u64     tcpPassiveOpens;
 287        u64     tcpAttemptFails;
 288        u64     tcpEstabResets;
 289        u64     tcpCurrEstab;
 290        u64     tcpInSegs;
 291        u64     tcpOutSegs;
 292        u64     tcpRetransSegs;
 293        u64     tcpInErrs;
 294        u64     tcpOutRsts;
 295};
 296
 297union rdma_protocol_stats {
 298        struct ib_protocol_stats        ib;
 299        struct iw_protocol_stats        iw;
 300};
 301
 302struct ib_port_attr {
 303        enum ib_port_state      state;
 304        enum ib_mtu             max_mtu;
 305        enum ib_mtu             active_mtu;
 306        int                     gid_tbl_len;
 307        u32                     port_cap_flags;
 308        u32                     max_msg_sz;
 309        u32                     bad_pkey_cntr;
 310        u32                     qkey_viol_cntr;
 311        u16                     pkey_tbl_len;
 312        u16                     lid;
 313        u16                     sm_lid;
 314        u8                      lmc;
 315        u8                      max_vl_num;
 316        u8                      sm_sl;
 317        u8                      subnet_timeout;
 318        u8                      init_type_reply;
 319        u8                      active_width;
 320        u8                      active_speed;
 321        u8                      phys_state;
 322};
 323
 324enum ib_device_modify_flags {
 325        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 326        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 327};
 328
 329struct ib_device_modify {
 330        u64     sys_image_guid;
 331        char    node_desc[64];
 332};
 333
 334enum ib_port_modify_flags {
 335        IB_PORT_SHUTDOWN                = 1,
 336        IB_PORT_INIT_TYPE               = (1<<2),
 337        IB_PORT_RESET_QKEY_CNTR         = (1<<3)
 338};
 339
 340struct ib_port_modify {
 341        u32     set_port_cap_mask;
 342        u32     clr_port_cap_mask;
 343        u8      init_type;
 344};
 345
 346enum ib_event_type {
 347        IB_EVENT_CQ_ERR,
 348        IB_EVENT_QP_FATAL,
 349        IB_EVENT_QP_REQ_ERR,
 350        IB_EVENT_QP_ACCESS_ERR,
 351        IB_EVENT_COMM_EST,
 352        IB_EVENT_SQ_DRAINED,
 353        IB_EVENT_PATH_MIG,
 354        IB_EVENT_PATH_MIG_ERR,
 355        IB_EVENT_DEVICE_FATAL,
 356        IB_EVENT_PORT_ACTIVE,
 357        IB_EVENT_PORT_ERR,
 358        IB_EVENT_LID_CHANGE,
 359        IB_EVENT_PKEY_CHANGE,
 360        IB_EVENT_SM_CHANGE,
 361        IB_EVENT_SRQ_ERR,
 362        IB_EVENT_SRQ_LIMIT_REACHED,
 363        IB_EVENT_QP_LAST_WQE_REACHED,
 364        IB_EVENT_CLIENT_REREGISTER,
 365        IB_EVENT_GID_CHANGE,
 366};
 367
 368struct ib_event {
 369        struct ib_device        *device;
 370        union {
 371                struct ib_cq    *cq;
 372                struct ib_qp    *qp;
 373                struct ib_srq   *srq;
 374                u8              port_num;
 375        } element;
 376        enum ib_event_type      event;
 377};
 378
 379struct ib_event_handler {
 380        struct ib_device *device;
 381        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 382        struct list_head  list;
 383};
 384
 385#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 386        do {                                                    \
 387                (_ptr)->device  = _device;                      \
 388                (_ptr)->handler = _handler;                     \
 389                INIT_LIST_HEAD(&(_ptr)->list);                  \
 390        } while (0)
 391
 392struct ib_global_route {
 393        union ib_gid    dgid;
 394        u32             flow_label;
 395        u8              sgid_index;
 396        u8              hop_limit;
 397        u8              traffic_class;
 398};
 399
 400struct ib_grh {
 401        __be32          version_tclass_flow;
 402        __be16          paylen;
 403        u8              next_hdr;
 404        u8              hop_limit;
 405        union ib_gid    sgid;
 406        union ib_gid    dgid;
 407};
 408
 409enum {
 410        IB_MULTICAST_QPN = 0xffffff
 411};
 412
 413#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 414
 415enum ib_ah_flags {
 416        IB_AH_GRH       = 1
 417};
 418
 419enum ib_rate {
 420        IB_RATE_PORT_CURRENT = 0,
 421        IB_RATE_2_5_GBPS = 2,
 422        IB_RATE_5_GBPS   = 5,
 423        IB_RATE_10_GBPS  = 3,
 424        IB_RATE_20_GBPS  = 6,
 425        IB_RATE_30_GBPS  = 4,
 426        IB_RATE_40_GBPS  = 7,
 427        IB_RATE_60_GBPS  = 8,
 428        IB_RATE_80_GBPS  = 9,
 429        IB_RATE_120_GBPS = 10,
 430        IB_RATE_14_GBPS  = 11,
 431        IB_RATE_56_GBPS  = 12,
 432        IB_RATE_112_GBPS = 13,
 433        IB_RATE_168_GBPS = 14,
 434        IB_RATE_25_GBPS  = 15,
 435        IB_RATE_100_GBPS = 16,
 436        IB_RATE_200_GBPS = 17,
 437        IB_RATE_300_GBPS = 18
 438};
 439
 440/**
 441 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 442 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 443 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 444 * @rate: rate to convert.
 445 */
 446int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
 447
 448/**
 449 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 450 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 451 * @rate: rate to convert.
 452 */
 453int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
 454
 455/**
 456 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 457 * enum.
 458 * @mult: multiple to convert.
 459 */
 460enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
 461
 462struct ib_ah_attr {
 463        struct ib_global_route  grh;
 464        u16                     dlid;
 465        u8                      sl;
 466        u8                      src_path_bits;
 467        u8                      static_rate;
 468        u8                      ah_flags;
 469        u8                      port_num;
 470};
 471
 472enum ib_wc_status {
 473        IB_WC_SUCCESS,
 474        IB_WC_LOC_LEN_ERR,
 475        IB_WC_LOC_QP_OP_ERR,
 476        IB_WC_LOC_EEC_OP_ERR,
 477        IB_WC_LOC_PROT_ERR,
 478        IB_WC_WR_FLUSH_ERR,
 479        IB_WC_MW_BIND_ERR,
 480        IB_WC_BAD_RESP_ERR,
 481        IB_WC_LOC_ACCESS_ERR,
 482        IB_WC_REM_INV_REQ_ERR,
 483        IB_WC_REM_ACCESS_ERR,
 484        IB_WC_REM_OP_ERR,
 485        IB_WC_RETRY_EXC_ERR,
 486        IB_WC_RNR_RETRY_EXC_ERR,
 487        IB_WC_LOC_RDD_VIOL_ERR,
 488        IB_WC_REM_INV_RD_REQ_ERR,
 489        IB_WC_REM_ABORT_ERR,
 490        IB_WC_INV_EECN_ERR,
 491        IB_WC_INV_EEC_STATE_ERR,
 492        IB_WC_FATAL_ERR,
 493        IB_WC_RESP_TIMEOUT_ERR,
 494        IB_WC_GENERAL_ERR
 495};
 496
 497enum ib_wc_opcode {
 498        IB_WC_SEND,
 499        IB_WC_RDMA_WRITE,
 500        IB_WC_RDMA_READ,
 501        IB_WC_COMP_SWAP,
 502        IB_WC_FETCH_ADD,
 503        IB_WC_BIND_MW,
 504        IB_WC_LSO,
 505        IB_WC_LOCAL_INV,
 506        IB_WC_FAST_REG_MR,
 507        IB_WC_MASKED_COMP_SWAP,
 508        IB_WC_MASKED_FETCH_ADD,
 509/*
 510 * Set value of IB_WC_RECV so consumers can test if a completion is a
 511 * receive by testing (opcode & IB_WC_RECV).
 512 */
 513        IB_WC_RECV                      = 1 << 7,
 514        IB_WC_RECV_RDMA_WITH_IMM
 515};
 516
 517enum ib_wc_flags {
 518        IB_WC_GRH               = 1,
 519        IB_WC_WITH_IMM          = (1<<1),
 520        IB_WC_WITH_INVALIDATE   = (1<<2),
 521        IB_WC_IP_CSUM_OK        = (1<<3),
 522};
 523
 524struct ib_wc {
 525        u64                     wr_id;
 526        enum ib_wc_status       status;
 527        enum ib_wc_opcode       opcode;
 528        u32                     vendor_err;
 529        u32                     byte_len;
 530        struct ib_qp           *qp;
 531        union {
 532                __be32          imm_data;
 533                u32             invalidate_rkey;
 534        } ex;
 535        u32                     src_qp;
 536        int                     wc_flags;
 537        u16                     pkey_index;
 538        u16                     slid;
 539        u8                      sl;
 540        u8                      dlid_path_bits;
 541        u8                      port_num;       /* valid only for DR SMPs on switches */
 542};
 543
 544enum ib_cq_notify_flags {
 545        IB_CQ_SOLICITED                 = 1 << 0,
 546        IB_CQ_NEXT_COMP                 = 1 << 1,
 547        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 548        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 549};
 550
 551enum ib_srq_type {
 552        IB_SRQT_BASIC,
 553        IB_SRQT_XRC
 554};
 555
 556enum ib_srq_attr_mask {
 557        IB_SRQ_MAX_WR   = 1 << 0,
 558        IB_SRQ_LIMIT    = 1 << 1,
 559};
 560
 561struct ib_srq_attr {
 562        u32     max_wr;
 563        u32     max_sge;
 564        u32     srq_limit;
 565};
 566
 567struct ib_srq_init_attr {
 568        void                  (*event_handler)(struct ib_event *, void *);
 569        void                   *srq_context;
 570        struct ib_srq_attr      attr;
 571        enum ib_srq_type        srq_type;
 572
 573        union {
 574                struct {
 575                        struct ib_xrcd *xrcd;
 576                        struct ib_cq   *cq;
 577                } xrc;
 578        } ext;
 579};
 580
 581struct ib_qp_cap {
 582        u32     max_send_wr;
 583        u32     max_recv_wr;
 584        u32     max_send_sge;
 585        u32     max_recv_sge;
 586        u32     max_inline_data;
 587};
 588
 589enum ib_sig_type {
 590        IB_SIGNAL_ALL_WR,
 591        IB_SIGNAL_REQ_WR
 592};
 593
 594enum ib_qp_type {
 595        /*
 596         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 597         * here (and in that order) since the MAD layer uses them as
 598         * indices into a 2-entry table.
 599         */
 600        IB_QPT_SMI,
 601        IB_QPT_GSI,
 602
 603        IB_QPT_RC,
 604        IB_QPT_UC,
 605        IB_QPT_UD,
 606        IB_QPT_RAW_IPV6,
 607        IB_QPT_RAW_ETHERTYPE,
 608        IB_QPT_RAW_PACKET = 8,
 609        IB_QPT_XRC_INI = 9,
 610        IB_QPT_XRC_TGT,
 611        IB_QPT_MAX
 612};
 613
 614enum ib_qp_create_flags {
 615        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
 616        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
 617        /* reserve bits 26-31 for low level drivers' internal use */
 618        IB_QP_CREATE_RESERVED_START             = 1 << 26,
 619        IB_QP_CREATE_RESERVED_END               = 1 << 31,
 620};
 621
 622struct ib_qp_init_attr {
 623        void                  (*event_handler)(struct ib_event *, void *);
 624        void                   *qp_context;
 625        struct ib_cq           *send_cq;
 626        struct ib_cq           *recv_cq;
 627        struct ib_srq          *srq;
 628        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
 629        struct ib_qp_cap        cap;
 630        enum ib_sig_type        sq_sig_type;
 631        enum ib_qp_type         qp_type;
 632        enum ib_qp_create_flags create_flags;
 633        u8                      port_num; /* special QP types only */
 634};
 635
 636struct ib_qp_open_attr {
 637        void                  (*event_handler)(struct ib_event *, void *);
 638        void                   *qp_context;
 639        u32                     qp_num;
 640        enum ib_qp_type         qp_type;
 641};
 642
 643enum ib_rnr_timeout {
 644        IB_RNR_TIMER_655_36 =  0,
 645        IB_RNR_TIMER_000_01 =  1,
 646        IB_RNR_TIMER_000_02 =  2,
 647        IB_RNR_TIMER_000_03 =  3,
 648        IB_RNR_TIMER_000_04 =  4,
 649        IB_RNR_TIMER_000_06 =  5,
 650        IB_RNR_TIMER_000_08 =  6,
 651        IB_RNR_TIMER_000_12 =  7,
 652        IB_RNR_TIMER_000_16 =  8,
 653        IB_RNR_TIMER_000_24 =  9,
 654        IB_RNR_TIMER_000_32 = 10,
 655        IB_RNR_TIMER_000_48 = 11,
 656        IB_RNR_TIMER_000_64 = 12,
 657        IB_RNR_TIMER_000_96 = 13,
 658        IB_RNR_TIMER_001_28 = 14,
 659        IB_RNR_TIMER_001_92 = 15,
 660        IB_RNR_TIMER_002_56 = 16,
 661        IB_RNR_TIMER_003_84 = 17,
 662        IB_RNR_TIMER_005_12 = 18,
 663        IB_RNR_TIMER_007_68 = 19,
 664        IB_RNR_TIMER_010_24 = 20,
 665        IB_RNR_TIMER_015_36 = 21,
 666        IB_RNR_TIMER_020_48 = 22,
 667        IB_RNR_TIMER_030_72 = 23,
 668        IB_RNR_TIMER_040_96 = 24,
 669        IB_RNR_TIMER_061_44 = 25,
 670        IB_RNR_TIMER_081_92 = 26,
 671        IB_RNR_TIMER_122_88 = 27,
 672        IB_RNR_TIMER_163_84 = 28,
 673        IB_RNR_TIMER_245_76 = 29,
 674        IB_RNR_TIMER_327_68 = 30,
 675        IB_RNR_TIMER_491_52 = 31
 676};
 677
 678enum ib_qp_attr_mask {
 679        IB_QP_STATE                     = 1,
 680        IB_QP_CUR_STATE                 = (1<<1),
 681        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
 682        IB_QP_ACCESS_FLAGS              = (1<<3),
 683        IB_QP_PKEY_INDEX                = (1<<4),
 684        IB_QP_PORT                      = (1<<5),
 685        IB_QP_QKEY                      = (1<<6),
 686        IB_QP_AV                        = (1<<7),
 687        IB_QP_PATH_MTU                  = (1<<8),
 688        IB_QP_TIMEOUT                   = (1<<9),
 689        IB_QP_RETRY_CNT                 = (1<<10),
 690        IB_QP_RNR_RETRY                 = (1<<11),
 691        IB_QP_RQ_PSN                    = (1<<12),
 692        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
 693        IB_QP_ALT_PATH                  = (1<<14),
 694        IB_QP_MIN_RNR_TIMER             = (1<<15),
 695        IB_QP_SQ_PSN                    = (1<<16),
 696        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
 697        IB_QP_PATH_MIG_STATE            = (1<<18),
 698        IB_QP_CAP                       = (1<<19),
 699        IB_QP_DEST_QPN                  = (1<<20)
 700};
 701
 702enum ib_qp_state {
 703        IB_QPS_RESET,
 704        IB_QPS_INIT,
 705        IB_QPS_RTR,
 706        IB_QPS_RTS,
 707        IB_QPS_SQD,
 708        IB_QPS_SQE,
 709        IB_QPS_ERR
 710};
 711
 712enum ib_mig_state {
 713        IB_MIG_MIGRATED,
 714        IB_MIG_REARM,
 715        IB_MIG_ARMED
 716};
 717
 718struct ib_qp_attr {
 719        enum ib_qp_state        qp_state;
 720        enum ib_qp_state        cur_qp_state;
 721        enum ib_mtu             path_mtu;
 722        enum ib_mig_state       path_mig_state;
 723        u32                     qkey;
 724        u32                     rq_psn;
 725        u32                     sq_psn;
 726        u32                     dest_qp_num;
 727        int                     qp_access_flags;
 728        struct ib_qp_cap        cap;
 729        struct ib_ah_attr       ah_attr;
 730        struct ib_ah_attr       alt_ah_attr;
 731        u16                     pkey_index;
 732        u16                     alt_pkey_index;
 733        u8                      en_sqd_async_notify;
 734        u8                      sq_draining;
 735        u8                      max_rd_atomic;
 736        u8                      max_dest_rd_atomic;
 737        u8                      min_rnr_timer;
 738        u8                      port_num;
 739        u8                      timeout;
 740        u8                      retry_cnt;
 741        u8                      rnr_retry;
 742        u8                      alt_port_num;
 743        u8                      alt_timeout;
 744};
 745
 746enum ib_wr_opcode {
 747        IB_WR_RDMA_WRITE,
 748        IB_WR_RDMA_WRITE_WITH_IMM,
 749        IB_WR_SEND,
 750        IB_WR_SEND_WITH_IMM,
 751        IB_WR_RDMA_READ,
 752        IB_WR_ATOMIC_CMP_AND_SWP,
 753        IB_WR_ATOMIC_FETCH_AND_ADD,
 754        IB_WR_LSO,
 755        IB_WR_SEND_WITH_INV,
 756        IB_WR_RDMA_READ_WITH_INV,
 757        IB_WR_LOCAL_INV,
 758        IB_WR_FAST_REG_MR,
 759        IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
 760        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 761};
 762
 763enum ib_send_flags {
 764        IB_SEND_FENCE           = 1,
 765        IB_SEND_SIGNALED        = (1<<1),
 766        IB_SEND_SOLICITED       = (1<<2),
 767        IB_SEND_INLINE          = (1<<3),
 768        IB_SEND_IP_CSUM         = (1<<4)
 769};
 770
 771struct ib_sge {
 772        u64     addr;
 773        u32     length;
 774        u32     lkey;
 775};
 776
 777struct ib_fast_reg_page_list {
 778        struct ib_device       *device;
 779        u64                    *page_list;
 780        unsigned int            max_page_list_len;
 781};
 782
 783struct ib_send_wr {
 784        struct ib_send_wr      *next;
 785        u64                     wr_id;
 786        struct ib_sge          *sg_list;
 787        int                     num_sge;
 788        enum ib_wr_opcode       opcode;
 789        int                     send_flags;
 790        union {
 791                __be32          imm_data;
 792                u32             invalidate_rkey;
 793        } ex;
 794        union {
 795                struct {
 796                        u64     remote_addr;
 797                        u32     rkey;
 798                } rdma;
 799                struct {
 800                        u64     remote_addr;
 801                        u64     compare_add;
 802                        u64     swap;
 803                        u64     compare_add_mask;
 804                        u64     swap_mask;
 805                        u32     rkey;
 806                } atomic;
 807                struct {
 808                        struct ib_ah *ah;
 809                        void   *header;
 810                        int     hlen;
 811                        int     mss;
 812                        u32     remote_qpn;
 813                        u32     remote_qkey;
 814                        u16     pkey_index; /* valid for GSI only */
 815                        u8      port_num;   /* valid for DR SMPs on switch only */
 816                } ud;
 817                struct {
 818                        u64                             iova_start;
 819                        struct ib_fast_reg_page_list   *page_list;
 820                        unsigned int                    page_shift;
 821                        unsigned int                    page_list_len;
 822                        u32                             length;
 823                        int                             access_flags;
 824                        u32                             rkey;
 825                } fast_reg;
 826        } wr;
 827        u32                     xrc_remote_srq_num;     /* XRC TGT QPs only */
 828};
 829
 830struct ib_recv_wr {
 831        struct ib_recv_wr      *next;
 832        u64                     wr_id;
 833        struct ib_sge          *sg_list;
 834        int                     num_sge;
 835};
 836
 837enum ib_access_flags {
 838        IB_ACCESS_LOCAL_WRITE   = 1,
 839        IB_ACCESS_REMOTE_WRITE  = (1<<1),
 840        IB_ACCESS_REMOTE_READ   = (1<<2),
 841        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
 842        IB_ACCESS_MW_BIND       = (1<<4)
 843};
 844
 845struct ib_phys_buf {
 846        u64      addr;
 847        u64      size;
 848};
 849
 850struct ib_mr_attr {
 851        struct ib_pd    *pd;
 852        u64             device_virt_addr;
 853        u64             size;
 854        int             mr_access_flags;
 855        u32             lkey;
 856        u32             rkey;
 857};
 858
 859enum ib_mr_rereg_flags {
 860        IB_MR_REREG_TRANS       = 1,
 861        IB_MR_REREG_PD          = (1<<1),
 862        IB_MR_REREG_ACCESS      = (1<<2)
 863};
 864
 865struct ib_mw_bind {
 866        struct ib_mr   *mr;
 867        u64             wr_id;
 868        u64             addr;
 869        u32             length;
 870        int             send_flags;
 871        int             mw_access_flags;
 872};
 873
 874struct ib_fmr_attr {
 875        int     max_pages;
 876        int     max_maps;
 877        u8      page_shift;
 878};
 879
 880struct ib_ucontext {
 881        struct ib_device       *device;
 882        struct list_head        pd_list;
 883        struct list_head        mr_list;
 884        struct list_head        mw_list;
 885        struct list_head        cq_list;
 886        struct list_head        qp_list;
 887        struct list_head        srq_list;
 888        struct list_head        ah_list;
 889        struct list_head        xrcd_list;
 890        int                     closing;
 891};
 892
 893struct ib_uobject {
 894        u64                     user_handle;    /* handle given to us by userspace */
 895        struct ib_ucontext     *context;        /* associated user context */
 896        void                   *object;         /* containing object */
 897        struct list_head        list;           /* link to context's list */
 898        int                     id;             /* index into kernel idr */
 899        struct kref             ref;
 900        struct rw_semaphore     mutex;          /* protects .live */
 901        int                     live;
 902};
 903
 904struct ib_udata {
 905        void __user *inbuf;
 906        void __user *outbuf;
 907        size_t       inlen;
 908        size_t       outlen;
 909};
 910
 911struct ib_pd {
 912        struct ib_device       *device;
 913        struct ib_uobject      *uobject;
 914        atomic_t                usecnt; /* count all resources */
 915};
 916
 917struct ib_xrcd {
 918        struct ib_device       *device;
 919        atomic_t                usecnt; /* count all exposed resources */
 920        struct inode           *inode;
 921
 922        struct mutex            tgt_qp_mutex;
 923        struct list_head        tgt_qp_list;
 924};
 925
 926struct ib_ah {
 927        struct ib_device        *device;
 928        struct ib_pd            *pd;
 929        struct ib_uobject       *uobject;
 930};
 931
 932typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 933
 934struct ib_cq {
 935        struct ib_device       *device;
 936        struct ib_uobject      *uobject;
 937        ib_comp_handler         comp_handler;
 938        void                  (*event_handler)(struct ib_event *, void *);
 939        void                   *cq_context;
 940        int                     cqe;
 941        atomic_t                usecnt; /* count number of work queues */
 942};
 943
 944struct ib_srq {
 945        struct ib_device       *device;
 946        struct ib_pd           *pd;
 947        struct ib_uobject      *uobject;
 948        void                  (*event_handler)(struct ib_event *, void *);
 949        void                   *srq_context;
 950        enum ib_srq_type        srq_type;
 951        atomic_t                usecnt;
 952
 953        union {
 954                struct {
 955                        struct ib_xrcd *xrcd;
 956                        struct ib_cq   *cq;
 957                        u32             srq_num;
 958                } xrc;
 959        } ext;
 960};
 961
 962struct ib_qp {
 963        struct ib_device       *device;
 964        struct ib_pd           *pd;
 965        struct ib_cq           *send_cq;
 966        struct ib_cq           *recv_cq;
 967        struct ib_srq          *srq;
 968        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
 969        struct list_head        xrcd_list;
 970        atomic_t                usecnt; /* count times opened, mcast attaches */
 971        struct list_head        open_list;
 972        struct ib_qp           *real_qp;
 973        struct ib_uobject      *uobject;
 974        void                  (*event_handler)(struct ib_event *, void *);
 975        void                   *qp_context;
 976        u32                     qp_num;
 977        enum ib_qp_type         qp_type;
 978};
 979
 980struct ib_mr {
 981        struct ib_device  *device;
 982        struct ib_pd      *pd;
 983        struct ib_uobject *uobject;
 984        u32                lkey;
 985        u32                rkey;
 986        atomic_t           usecnt; /* count number of MWs */
 987};
 988
 989struct ib_mw {
 990        struct ib_device        *device;
 991        struct ib_pd            *pd;
 992        struct ib_uobject       *uobject;
 993        u32                     rkey;
 994};
 995
 996struct ib_fmr {
 997        struct ib_device        *device;
 998        struct ib_pd            *pd;
 999        struct list_head        list;
1000        u32                     lkey;
1001        u32                     rkey;
1002};
1003
1004struct ib_mad;
1005struct ib_grh;
1006
1007enum ib_process_mad_flags {
1008        IB_MAD_IGNORE_MKEY      = 1,
1009        IB_MAD_IGNORE_BKEY      = 2,
1010        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1011};
1012
1013enum ib_mad_result {
1014        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1015        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1016        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1017        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1018};
1019
1020#define IB_DEVICE_NAME_MAX 64
1021
1022struct ib_cache {
1023        rwlock_t                lock;
1024        struct ib_event_handler event_handler;
1025        struct ib_pkey_cache  **pkey_cache;
1026        struct ib_gid_cache   **gid_cache;
1027        u8                     *lmc_cache;
1028};
1029
1030struct ib_dma_mapping_ops {
1031        int             (*mapping_error)(struct ib_device *dev,
1032                                         u64 dma_addr);
1033        u64             (*map_single)(struct ib_device *dev,
1034                                      void *ptr, size_t size,
1035                                      enum dma_data_direction direction);
1036        void            (*unmap_single)(struct ib_device *dev,
1037                                        u64 addr, size_t size,
1038                                        enum dma_data_direction direction);
1039        u64             (*map_page)(struct ib_device *dev,
1040                                    struct page *page, unsigned long offset,
1041                                    size_t size,
1042                                    enum dma_data_direction direction);
1043        void            (*unmap_page)(struct ib_device *dev,
1044                                      u64 addr, size_t size,
1045                                      enum dma_data_direction direction);
1046        int             (*map_sg)(struct ib_device *dev,
1047                                  struct scatterlist *sg, int nents,
1048                                  enum dma_data_direction direction);
1049        void            (*unmap_sg)(struct ib_device *dev,
1050                                    struct scatterlist *sg, int nents,
1051                                    enum dma_data_direction direction);
1052        u64             (*dma_address)(struct ib_device *dev,
1053                                       struct scatterlist *sg);
1054        unsigned int    (*dma_len)(struct ib_device *dev,
1055                                   struct scatterlist *sg);
1056        void            (*sync_single_for_cpu)(struct ib_device *dev,
1057                                               u64 dma_handle,
1058                                               size_t size,
1059                                               enum dma_data_direction dir);
1060        void            (*sync_single_for_device)(struct ib_device *dev,
1061                                                  u64 dma_handle,
1062                                                  size_t size,
1063                                                  enum dma_data_direction dir);
1064        void            *(*alloc_coherent)(struct ib_device *dev,
1065                                           size_t size,
1066                                           u64 *dma_handle,
1067                                           gfp_t flag);
1068        void            (*free_coherent)(struct ib_device *dev,
1069                                         size_t size, void *cpu_addr,
1070                                         u64 dma_handle);
1071};
1072
1073struct iw_cm_verbs;
1074
1075struct ib_device {
1076        struct device                *dma_device;
1077
1078        char                          name[IB_DEVICE_NAME_MAX];
1079
1080        struct list_head              event_handler_list;
1081        spinlock_t                    event_handler_lock;
1082
1083        spinlock_t                    client_data_lock;
1084        struct list_head              core_list;
1085        struct list_head              client_data_list;
1086
1087        struct ib_cache               cache;
1088        int                          *pkey_tbl_len;
1089        int                          *gid_tbl_len;
1090
1091        int                           num_comp_vectors;
1092
1093        struct iw_cm_verbs           *iwcm;
1094
1095        int                        (*get_protocol_stats)(struct ib_device *device,
1096                                                         union rdma_protocol_stats *stats);
1097        int                        (*query_device)(struct ib_device *device,
1098                                                   struct ib_device_attr *device_attr);
1099        int                        (*query_port)(struct ib_device *device,
1100                                                 u8 port_num,
1101                                                 struct ib_port_attr *port_attr);
1102        enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
1103                                                     u8 port_num);
1104        int                        (*query_gid)(struct ib_device *device,
1105                                                u8 port_num, int index,
1106                                                union ib_gid *gid);
1107        int                        (*query_pkey)(struct ib_device *device,
1108                                                 u8 port_num, u16 index, u16 *pkey);
1109        int                        (*modify_device)(struct ib_device *device,
1110                                                    int device_modify_mask,
1111                                                    struct ib_device_modify *device_modify);
1112        int                        (*modify_port)(struct ib_device *device,
1113                                                  u8 port_num, int port_modify_mask,
1114                                                  struct ib_port_modify *port_modify);
1115        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1116                                                     struct ib_udata *udata);
1117        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1118        int                        (*mmap)(struct ib_ucontext *context,
1119                                           struct vm_area_struct *vma);
1120        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1121                                               struct ib_ucontext *context,
1122                                               struct ib_udata *udata);
1123        int                        (*dealloc_pd)(struct ib_pd *pd);
1124        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1125                                                struct ib_ah_attr *ah_attr);
1126        int                        (*modify_ah)(struct ib_ah *ah,
1127                                                struct ib_ah_attr *ah_attr);
1128        int                        (*query_ah)(struct ib_ah *ah,
1129                                               struct ib_ah_attr *ah_attr);
1130        int                        (*destroy_ah)(struct ib_ah *ah);
1131        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1132                                                 struct ib_srq_init_attr *srq_init_attr,
1133                                                 struct ib_udata *udata);
1134        int                        (*modify_srq)(struct ib_srq *srq,
1135                                                 struct ib_srq_attr *srq_attr,
1136                                                 enum ib_srq_attr_mask srq_attr_mask,
1137                                                 struct ib_udata *udata);
1138        int                        (*query_srq)(struct ib_srq *srq,
1139                                                struct ib_srq_attr *srq_attr);
1140        int                        (*destroy_srq)(struct ib_srq *srq);
1141        int                        (*post_srq_recv)(struct ib_srq *srq,
1142                                                    struct ib_recv_wr *recv_wr,
1143                                                    struct ib_recv_wr **bad_recv_wr);
1144        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1145                                                struct ib_qp_init_attr *qp_init_attr,
1146                                                struct ib_udata *udata);
1147        int                        (*modify_qp)(struct ib_qp *qp,
1148                                                struct ib_qp_attr *qp_attr,
1149                                                int qp_attr_mask,
1150                                                struct ib_udata *udata);
1151        int                        (*query_qp)(struct ib_qp *qp,
1152                                               struct ib_qp_attr *qp_attr,
1153                                               int qp_attr_mask,
1154                                               struct ib_qp_init_attr *qp_init_attr);
1155        int                        (*destroy_qp)(struct ib_qp *qp);
1156        int                        (*post_send)(struct ib_qp *qp,
1157                                                struct ib_send_wr *send_wr,
1158                                                struct ib_send_wr **bad_send_wr);
1159        int                        (*post_recv)(struct ib_qp *qp,
1160                                                struct ib_recv_wr *recv_wr,
1161                                                struct ib_recv_wr **bad_recv_wr);
1162        struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1163                                                int comp_vector,
1164                                                struct ib_ucontext *context,
1165                                                struct ib_udata *udata);
1166        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1167                                                u16 cq_period);
1168        int                        (*destroy_cq)(struct ib_cq *cq);
1169        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1170                                                struct ib_udata *udata);
1171        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1172                                              struct ib_wc *wc);
1173        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1174        int                        (*req_notify_cq)(struct ib_cq *cq,
1175                                                    enum ib_cq_notify_flags flags);
1176        int                        (*req_ncomp_notif)(struct ib_cq *cq,
1177                                                      int wc_cnt);
1178        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1179                                                 int mr_access_flags);
1180        struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1181                                                  struct ib_phys_buf *phys_buf_array,
1182                                                  int num_phys_buf,
1183                                                  int mr_access_flags,
1184                                                  u64 *iova_start);
1185        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1186                                                  u64 start, u64 length,
1187                                                  u64 virt_addr,
1188                                                  int mr_access_flags,
1189                                                  struct ib_udata *udata);
1190        int                        (*query_mr)(struct ib_mr *mr,
1191                                               struct ib_mr_attr *mr_attr);
1192        int                        (*dereg_mr)(struct ib_mr *mr);
1193        struct ib_mr *             (*alloc_fast_reg_mr)(struct ib_pd *pd,
1194                                               int max_page_list_len);
1195        struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1196                                                                   int page_list_len);
1197        void                       (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1198        int                        (*rereg_phys_mr)(struct ib_mr *mr,
1199                                                    int mr_rereg_mask,
1200                                                    struct ib_pd *pd,
1201                                                    struct ib_phys_buf *phys_buf_array,
1202                                                    int num_phys_buf,
1203                                                    int mr_access_flags,
1204                                                    u64 *iova_start);
1205        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1206        int                        (*bind_mw)(struct ib_qp *qp,
1207                                              struct ib_mw *mw,
1208                                              struct ib_mw_bind *mw_bind);
1209        int                        (*dealloc_mw)(struct ib_mw *mw);
1210        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
1211                                                int mr_access_flags,
1212                                                struct ib_fmr_attr *fmr_attr);
1213        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
1214                                                   u64 *page_list, int list_len,
1215                                                   u64 iova);
1216        int                        (*unmap_fmr)(struct list_head *fmr_list);
1217        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
1218        int                        (*attach_mcast)(struct ib_qp *qp,
1219                                                   union ib_gid *gid,
1220                                                   u16 lid);
1221        int                        (*detach_mcast)(struct ib_qp *qp,
1222                                                   union ib_gid *gid,
1223                                                   u16 lid);
1224        int                        (*process_mad)(struct ib_device *device,
1225                                                  int process_mad_flags,
1226                                                  u8 port_num,
1227                                                  struct ib_wc *in_wc,
1228                                                  struct ib_grh *in_grh,
1229                                                  struct ib_mad *in_mad,
1230                                                  struct ib_mad *out_mad);
1231        struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
1232                                                 struct ib_ucontext *ucontext,
1233                                                 struct ib_udata *udata);
1234        int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1235
1236        struct ib_dma_mapping_ops   *dma_ops;
1237
1238        struct module               *owner;
1239        struct device                dev;
1240        struct kobject               *ports_parent;
1241        struct list_head             port_list;
1242
1243        enum {
1244                IB_DEV_UNINITIALIZED,
1245                IB_DEV_REGISTERED,
1246                IB_DEV_UNREGISTERED
1247        }                            reg_state;
1248
1249        int                          uverbs_abi_ver;
1250        u64                          uverbs_cmd_mask;
1251
1252        char                         node_desc[64];
1253        __be64                       node_guid;
1254        u32                          local_dma_lkey;
1255        u8                           node_type;
1256        u8                           phys_port_cnt;
1257};
1258
1259struct ib_client {
1260        char  *name;
1261        void (*add)   (struct ib_device *);
1262        void (*remove)(struct ib_device *);
1263
1264        struct list_head list;
1265};
1266
1267struct ib_device *ib_alloc_device(size_t size);
1268void ib_dealloc_device(struct ib_device *device);
1269
1270int ib_register_device(struct ib_device *device,
1271                       int (*port_callback)(struct ib_device *,
1272                                            u8, struct kobject *));
1273void ib_unregister_device(struct ib_device *device);
1274
1275int ib_register_client   (struct ib_client *client);
1276void ib_unregister_client(struct ib_client *client);
1277
1278void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1279void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1280                         void *data);
1281
1282static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1283{
1284        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1285}
1286
1287static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1288{
1289        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1290}
1291
1292/**
1293 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1294 * contains all required attributes and no attributes not allowed for
1295 * the given QP state transition.
1296 * @cur_state: Current QP state
1297 * @next_state: Next QP state
1298 * @type: QP type
1299 * @mask: Mask of supplied QP attributes
1300 *
1301 * This function is a helper function that a low-level driver's
1302 * modify_qp method can use to validate the consumer's input.  It
1303 * checks that cur_state and next_state are valid QP states, that a
1304 * transition from cur_state to next_state is allowed by the IB spec,
1305 * and that the attribute mask supplied is allowed for the transition.
1306 */
1307int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1308                       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1309
1310int ib_register_event_handler  (struct ib_event_handler *event_handler);
1311int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1312void ib_dispatch_event(struct ib_event *event);
1313
1314int ib_query_device(struct ib_device *device,
1315                    struct ib_device_attr *device_attr);
1316
1317int ib_query_port(struct ib_device *device,
1318                  u8 port_num, struct ib_port_attr *port_attr);
1319
1320enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1321                                               u8 port_num);
1322
1323int ib_query_gid(struct ib_device *device,
1324                 u8 port_num, int index, union ib_gid *gid);
1325
1326int ib_query_pkey(struct ib_device *device,
1327                  u8 port_num, u16 index, u16 *pkey);
1328
1329int ib_modify_device(struct ib_device *device,
1330                     int device_modify_mask,
1331                     struct ib_device_modify *device_modify);
1332
1333int ib_modify_port(struct ib_device *device,
1334                   u8 port_num, int port_modify_mask,
1335                   struct ib_port_modify *port_modify);
1336
1337int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1338                u8 *port_num, u16 *index);
1339
1340int ib_find_pkey(struct ib_device *device,
1341                 u8 port_num, u16 pkey, u16 *index);
1342
1343/**
1344 * ib_alloc_pd - Allocates an unused protection domain.
1345 * @device: The device on which to allocate the protection domain.
1346 *
1347 * A protection domain object provides an association between QPs, shared
1348 * receive queues, address handles, memory regions, and memory windows.
1349 */
1350struct ib_pd *ib_alloc_pd(struct ib_device *device);
1351
1352/**
1353 * ib_dealloc_pd - Deallocates a protection domain.
1354 * @pd: The protection domain to deallocate.
1355 */
1356int ib_dealloc_pd(struct ib_pd *pd);
1357
1358/**
1359 * ib_create_ah - Creates an address handle for the given address vector.
1360 * @pd: The protection domain associated with the address handle.
1361 * @ah_attr: The attributes of the address vector.
1362 *
1363 * The address handle is used to reference a local or global destination
1364 * in all UD QP post sends.
1365 */
1366struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1367
1368/**
1369 * ib_init_ah_from_wc - Initializes address handle attributes from a
1370 *   work completion.
1371 * @device: Device on which the received message arrived.
1372 * @port_num: Port on which the received message arrived.
1373 * @wc: Work completion associated with the received message.
1374 * @grh: References the received global route header.  This parameter is
1375 *   ignored unless the work completion indicates that the GRH is valid.
1376 * @ah_attr: Returned attributes that can be used when creating an address
1377 *   handle for replying to the message.
1378 */
1379int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1380                       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1381
1382/**
1383 * ib_create_ah_from_wc - Creates an address handle associated with the
1384 *   sender of the specified work completion.
1385 * @pd: The protection domain associated with the address handle.
1386 * @wc: Work completion information associated with a received message.
1387 * @grh: References the received global route header.  This parameter is
1388 *   ignored unless the work completion indicates that the GRH is valid.
1389 * @port_num: The outbound port number to associate with the address.
1390 *
1391 * The address handle is used to reference a local or global destination
1392 * in all UD QP post sends.
1393 */
1394struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1395                                   struct ib_grh *grh, u8 port_num);
1396
1397/**
1398 * ib_modify_ah - Modifies the address vector associated with an address
1399 *   handle.
1400 * @ah: The address handle to modify.
1401 * @ah_attr: The new address vector attributes to associate with the
1402 *   address handle.
1403 */
1404int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1405
1406/**
1407 * ib_query_ah - Queries the address vector associated with an address
1408 *   handle.
1409 * @ah: The address handle to query.
1410 * @ah_attr: The address vector attributes associated with the address
1411 *   handle.
1412 */
1413int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1414
1415/**
1416 * ib_destroy_ah - Destroys an address handle.
1417 * @ah: The address handle to destroy.
1418 */
1419int ib_destroy_ah(struct ib_ah *ah);
1420
1421/**
1422 * ib_create_srq - Creates a SRQ associated with the specified protection
1423 *   domain.
1424 * @pd: The protection domain associated with the SRQ.
1425 * @srq_init_attr: A list of initial attributes required to create the
1426 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1427 *   the actual capabilities of the created SRQ.
1428 *
1429 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1430 * requested size of the SRQ, and set to the actual values allocated
1431 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1432 * will always be at least as large as the requested values.
1433 */
1434struct ib_srq *ib_create_srq(struct ib_pd *pd,
1435                             struct ib_srq_init_attr *srq_init_attr);
1436
1437/**
1438 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1439 * @srq: The SRQ to modify.
1440 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1441 *   the current values of selected SRQ attributes are returned.
1442 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1443 *   are being modified.
1444 *
1445 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1446 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1447 * the number of receives queued drops below the limit.
1448 */
1449int ib_modify_srq(struct ib_srq *srq,
1450                  struct ib_srq_attr *srq_attr,
1451                  enum ib_srq_attr_mask srq_attr_mask);
1452
1453/**
1454 * ib_query_srq - Returns the attribute list and current values for the
1455 *   specified SRQ.
1456 * @srq: The SRQ to query.
1457 * @srq_attr: The attributes of the specified SRQ.
1458 */
1459int ib_query_srq(struct ib_srq *srq,
1460                 struct ib_srq_attr *srq_attr);
1461
1462/**
1463 * ib_destroy_srq - Destroys the specified SRQ.
1464 * @srq: The SRQ to destroy.
1465 */
1466int ib_destroy_srq(struct ib_srq *srq);
1467
1468/**
1469 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1470 * @srq: The SRQ to post the work request on.
1471 * @recv_wr: A list of work requests to post on the receive queue.
1472 * @bad_recv_wr: On an immediate failure, this parameter will reference
1473 *   the work request that failed to be posted on the QP.
1474 */
1475static inline int ib_post_srq_recv(struct ib_srq *srq,
1476                                   struct ib_recv_wr *recv_wr,
1477                                   struct ib_recv_wr **bad_recv_wr)
1478{
1479        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1480}
1481
1482/**
1483 * ib_create_qp - Creates a QP associated with the specified protection
1484 *   domain.
1485 * @pd: The protection domain associated with the QP.
1486 * @qp_init_attr: A list of initial attributes required to create the
1487 *   QP.  If QP creation succeeds, then the attributes are updated to
1488 *   the actual capabilities of the created QP.
1489 */
1490struct ib_qp *ib_create_qp(struct ib_pd *pd,
1491                           struct ib_qp_init_attr *qp_init_attr);
1492
1493/**
1494 * ib_modify_qp - Modifies the attributes for the specified QP and then
1495 *   transitions the QP to the given state.
1496 * @qp: The QP to modify.
1497 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1498 *   the current values of selected QP attributes are returned.
1499 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1500 *   are being modified.
1501 */
1502int ib_modify_qp(struct ib_qp *qp,
1503                 struct ib_qp_attr *qp_attr,
1504                 int qp_attr_mask);
1505
1506/**
1507 * ib_query_qp - Returns the attribute list and current values for the
1508 *   specified QP.
1509 * @qp: The QP to query.
1510 * @qp_attr: The attributes of the specified QP.
1511 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1512 * @qp_init_attr: Additional attributes of the selected QP.
1513 *
1514 * The qp_attr_mask may be used to limit the query to gathering only the
1515 * selected attributes.
1516 */
1517int ib_query_qp(struct ib_qp *qp,
1518                struct ib_qp_attr *qp_attr,
1519                int qp_attr_mask,
1520                struct ib_qp_init_attr *qp_init_attr);
1521
1522/**
1523 * ib_destroy_qp - Destroys the specified QP.
1524 * @qp: The QP to destroy.
1525 */
1526int ib_destroy_qp(struct ib_qp *qp);
1527
1528/**
1529 * ib_open_qp - Obtain a reference to an existing sharable QP.
1530 * @xrcd - XRC domain
1531 * @qp_open_attr: Attributes identifying the QP to open.
1532 *
1533 * Returns a reference to a sharable QP.
1534 */
1535struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1536                         struct ib_qp_open_attr *qp_open_attr);
1537
1538/**
1539 * ib_close_qp - Release an external reference to a QP.
1540 * @qp: The QP handle to release
1541 *
1542 * The opened QP handle is released by the caller.  The underlying
1543 * shared QP is not destroyed until all internal references are released.
1544 */
1545int ib_close_qp(struct ib_qp *qp);
1546
1547/**
1548 * ib_post_send - Posts a list of work requests to the send queue of
1549 *   the specified QP.
1550 * @qp: The QP to post the work request on.
1551 * @send_wr: A list of work requests to post on the send queue.
1552 * @bad_send_wr: On an immediate failure, this parameter will reference
1553 *   the work request that failed to be posted on the QP.
1554 *
1555 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1556 * error is returned, the QP state shall not be affected,
1557 * ib_post_send() will return an immediate error after queueing any
1558 * earlier work requests in the list.
1559 */
1560static inline int ib_post_send(struct ib_qp *qp,
1561                               struct ib_send_wr *send_wr,
1562                               struct ib_send_wr **bad_send_wr)
1563{
1564        return qp->device->post_send(qp, send_wr, bad_send_wr);
1565}
1566
1567/**
1568 * ib_post_recv - Posts a list of work requests to the receive queue of
1569 *   the specified QP.
1570 * @qp: The QP to post the work request on.
1571 * @recv_wr: A list of work requests to post on the receive queue.
1572 * @bad_recv_wr: On an immediate failure, this parameter will reference
1573 *   the work request that failed to be posted on the QP.
1574 */
1575static inline int ib_post_recv(struct ib_qp *qp,
1576                               struct ib_recv_wr *recv_wr,
1577                               struct ib_recv_wr **bad_recv_wr)
1578{
1579        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1580}
1581
1582/**
1583 * ib_create_cq - Creates a CQ on the specified device.
1584 * @device: The device on which to create the CQ.
1585 * @comp_handler: A user-specified callback that is invoked when a
1586 *   completion event occurs on the CQ.
1587 * @event_handler: A user-specified callback that is invoked when an
1588 *   asynchronous event not associated with a completion occurs on the CQ.
1589 * @cq_context: Context associated with the CQ returned to the user via
1590 *   the associated completion and event handlers.
1591 * @cqe: The minimum size of the CQ.
1592 * @comp_vector - Completion vector used to signal completion events.
1593 *     Must be >= 0 and < context->num_comp_vectors.
1594 *
1595 * Users can examine the cq structure to determine the actual CQ size.
1596 */
1597struct ib_cq *ib_create_cq(struct ib_device *device,
1598                           ib_comp_handler comp_handler,
1599                           void (*event_handler)(struct ib_event *, void *),
1600                           void *cq_context, int cqe, int comp_vector);
1601
1602/**
1603 * ib_resize_cq - Modifies the capacity of the CQ.
1604 * @cq: The CQ to resize.
1605 * @cqe: The minimum size of the CQ.
1606 *
1607 * Users can examine the cq structure to determine the actual CQ size.
1608 */
1609int ib_resize_cq(struct ib_cq *cq, int cqe);
1610
1611/**
1612 * ib_modify_cq - Modifies moderation params of the CQ
1613 * @cq: The CQ to modify.
1614 * @cq_count: number of CQEs that will trigger an event
1615 * @cq_period: max period of time in usec before triggering an event
1616 *
1617 */
1618int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1619
1620/**
1621 * ib_destroy_cq - Destroys the specified CQ.
1622 * @cq: The CQ to destroy.
1623 */
1624int ib_destroy_cq(struct ib_cq *cq);
1625
1626/**
1627 * ib_poll_cq - poll a CQ for completion(s)
1628 * @cq:the CQ being polled
1629 * @num_entries:maximum number of completions to return
1630 * @wc:array of at least @num_entries &struct ib_wc where completions
1631 *   will be returned
1632 *
1633 * Poll a CQ for (possibly multiple) completions.  If the return value
1634 * is < 0, an error occurred.  If the return value is >= 0, it is the
1635 * number of completions returned.  If the return value is
1636 * non-negative and < num_entries, then the CQ was emptied.
1637 */
1638static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1639                             struct ib_wc *wc)
1640{
1641        return cq->device->poll_cq(cq, num_entries, wc);
1642}
1643
1644/**
1645 * ib_peek_cq - Returns the number of unreaped completions currently
1646 *   on the specified CQ.
1647 * @cq: The CQ to peek.
1648 * @wc_cnt: A minimum number of unreaped completions to check for.
1649 *
1650 * If the number of unreaped completions is greater than or equal to wc_cnt,
1651 * this function returns wc_cnt, otherwise, it returns the actual number of
1652 * unreaped completions.
1653 */
1654int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1655
1656/**
1657 * ib_req_notify_cq - Request completion notification on a CQ.
1658 * @cq: The CQ to generate an event for.
1659 * @flags:
1660 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1661 *   to request an event on the next solicited event or next work
1662 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1663 *   may also be |ed in to request a hint about missed events, as
1664 *   described below.
1665 *
1666 * Return Value:
1667 *    < 0 means an error occurred while requesting notification
1668 *   == 0 means notification was requested successfully, and if
1669 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1670 *        were missed and it is safe to wait for another event.  In
1671 *        this case is it guaranteed that any work completions added
1672 *        to the CQ since the last CQ poll will trigger a completion
1673 *        notification event.
1674 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1675 *        in.  It means that the consumer must poll the CQ again to
1676 *        make sure it is empty to avoid missing an event because of a
1677 *        race between requesting notification and an entry being
1678 *        added to the CQ.  This return value means it is possible
1679 *        (but not guaranteed) that a work completion has been added
1680 *        to the CQ since the last poll without triggering a
1681 *        completion notification event.
1682 */
1683static inline int ib_req_notify_cq(struct ib_cq *cq,
1684                                   enum ib_cq_notify_flags flags)
1685{
1686        return cq->device->req_notify_cq(cq, flags);
1687}
1688
1689/**
1690 * ib_req_ncomp_notif - Request completion notification when there are
1691 *   at least the specified number of unreaped completions on the CQ.
1692 * @cq: The CQ to generate an event for.
1693 * @wc_cnt: The number of unreaped completions that should be on the
1694 *   CQ before an event is generated.
1695 */
1696static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1697{
1698        return cq->device->req_ncomp_notif ?
1699                cq->device->req_ncomp_notif(cq, wc_cnt) :
1700                -ENOSYS;
1701}
1702
1703/**
1704 * ib_get_dma_mr - Returns a memory region for system memory that is
1705 *   usable for DMA.
1706 * @pd: The protection domain associated with the memory region.
1707 * @mr_access_flags: Specifies the memory access rights.
1708 *
1709 * Note that the ib_dma_*() functions defined below must be used
1710 * to create/destroy addresses used with the Lkey or Rkey returned
1711 * by ib_get_dma_mr().
1712 */
1713struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1714
1715/**
1716 * ib_dma_mapping_error - check a DMA addr for error
1717 * @dev: The device for which the dma_addr was created
1718 * @dma_addr: The DMA address to check
1719 */
1720static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1721{
1722        if (dev->dma_ops)
1723                return dev->dma_ops->mapping_error(dev, dma_addr);
1724        return dma_mapping_error(dev->dma_device, dma_addr);
1725}
1726
1727/**
1728 * ib_dma_map_single - Map a kernel virtual address to DMA address
1729 * @dev: The device for which the dma_addr is to be created
1730 * @cpu_addr: The kernel virtual address
1731 * @size: The size of the region in bytes
1732 * @direction: The direction of the DMA
1733 */
1734static inline u64 ib_dma_map_single(struct ib_device *dev,
1735                                    void *cpu_addr, size_t size,
1736                                    enum dma_data_direction direction)
1737{
1738        if (dev->dma_ops)
1739                return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1740        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1741}
1742
1743/**
1744 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1745 * @dev: The device for which the DMA address was created
1746 * @addr: The DMA address
1747 * @size: The size of the region in bytes
1748 * @direction: The direction of the DMA
1749 */
1750static inline void ib_dma_unmap_single(struct ib_device *dev,
1751                                       u64 addr, size_t size,
1752                                       enum dma_data_direction direction)
1753{
1754        if (dev->dma_ops)
1755                dev->dma_ops->unmap_single(dev, addr, size, direction);
1756        else
1757                dma_unmap_single(dev->dma_device, addr, size, direction);
1758}
1759
1760static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1761                                          void *cpu_addr, size_t size,
1762                                          enum dma_data_direction direction,
1763                                          struct dma_attrs *attrs)
1764{
1765        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1766                                    direction, attrs);
1767}
1768
1769static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1770                                             u64 addr, size_t size,
1771                                             enum dma_data_direction direction,
1772                                             struct dma_attrs *attrs)
1773{
1774        return dma_unmap_single_attrs(dev->dma_device, addr, size,
1775                                      direction, attrs);
1776}
1777
1778/**
1779 * ib_dma_map_page - Map a physical page to DMA address
1780 * @dev: The device for which the dma_addr is to be created
1781 * @page: The page to be mapped
1782 * @offset: The offset within the page
1783 * @size: The size of the region in bytes
1784 * @direction: The direction of the DMA
1785 */
1786static inline u64 ib_dma_map_page(struct ib_device *dev,
1787                                  struct page *page,
1788                                  unsigned long offset,
1789                                  size_t size,
1790                                         enum dma_data_direction direction)
1791{
1792        if (dev->dma_ops)
1793                return dev->dma_ops->map_page(dev, page, offset, size, direction);
1794        return dma_map_page(dev->dma_device, page, offset, size, direction);
1795}
1796
1797/**
1798 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1799 * @dev: The device for which the DMA address was created
1800 * @addr: The DMA address
1801 * @size: The size of the region in bytes
1802 * @direction: The direction of the DMA
1803 */
1804static inline void ib_dma_unmap_page(struct ib_device *dev,
1805                                     u64 addr, size_t size,
1806                                     enum dma_data_direction direction)
1807{
1808        if (dev->dma_ops)
1809                dev->dma_ops->unmap_page(dev, addr, size, direction);
1810        else
1811                dma_unmap_page(dev->dma_device, addr, size, direction);
1812}
1813
1814/**
1815 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1816 * @dev: The device for which the DMA addresses are to be created
1817 * @sg: The array of scatter/gather entries
1818 * @nents: The number of scatter/gather entries
1819 * @direction: The direction of the DMA
1820 */
1821static inline int ib_dma_map_sg(struct ib_device *dev,
1822                                struct scatterlist *sg, int nents,
1823                                enum dma_data_direction direction)
1824{
1825        if (dev->dma_ops)
1826                return dev->dma_ops->map_sg(dev, sg, nents, direction);
1827        return dma_map_sg(dev->dma_device, sg, nents, direction);
1828}
1829
1830/**
1831 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1832 * @dev: The device for which the DMA addresses were created
1833 * @sg: The array of scatter/gather entries
1834 * @nents: The number of scatter/gather entries
1835 * @direction: The direction of the DMA
1836 */
1837static inline void ib_dma_unmap_sg(struct ib_device *dev,
1838                                   struct scatterlist *sg, int nents,
1839                                   enum dma_data_direction direction)
1840{
1841        if (dev->dma_ops)
1842                dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1843        else
1844                dma_unmap_sg(dev->dma_device, sg, nents, direction);
1845}
1846
1847static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1848                                      struct scatterlist *sg, int nents,
1849                                      enum dma_data_direction direction,
1850                                      struct dma_attrs *attrs)
1851{
1852        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1853}
1854
1855static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1856                                         struct scatterlist *sg, int nents,
1857                                         enum dma_data_direction direction,
1858                                         struct dma_attrs *attrs)
1859{
1860        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1861}
1862/**
1863 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1864 * @dev: The device for which the DMA addresses were created
1865 * @sg: The scatter/gather entry
1866 */
1867static inline u64 ib_sg_dma_address(struct ib_device *dev,
1868                                    struct scatterlist *sg)
1869{
1870        if (dev->dma_ops)
1871                return dev->dma_ops->dma_address(dev, sg);
1872        return sg_dma_address(sg);
1873}
1874
1875/**
1876 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1877 * @dev: The device for which the DMA addresses were created
1878 * @sg: The scatter/gather entry
1879 */
1880static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1881                                         struct scatterlist *sg)
1882{
1883        if (dev->dma_ops)
1884                return dev->dma_ops->dma_len(dev, sg);
1885        return sg_dma_len(sg);
1886}
1887
1888/**
1889 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1890 * @dev: The device for which the DMA address was created
1891 * @addr: The DMA address
1892 * @size: The size of the region in bytes
1893 * @dir: The direction of the DMA
1894 */
1895static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1896                                              u64 addr,
1897                                              size_t size,
1898                                              enum dma_data_direction dir)
1899{
1900        if (dev->dma_ops)
1901                dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1902        else
1903                dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1904}
1905
1906/**
1907 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1908 * @dev: The device for which the DMA address was created
1909 * @addr: The DMA address
1910 * @size: The size of the region in bytes
1911 * @dir: The direction of the DMA
1912 */
1913static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1914                                                 u64 addr,
1915                                                 size_t size,
1916                                                 enum dma_data_direction dir)
1917{
1918        if (dev->dma_ops)
1919                dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1920        else
1921                dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1922}
1923
1924/**
1925 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1926 * @dev: The device for which the DMA address is requested
1927 * @size: The size of the region to allocate in bytes
1928 * @dma_handle: A pointer for returning the DMA address of the region
1929 * @flag: memory allocator flags
1930 */
1931static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1932                                           size_t size,
1933                                           u64 *dma_handle,
1934                                           gfp_t flag)
1935{
1936        if (dev->dma_ops)
1937                return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1938        else {
1939                dma_addr_t handle;
1940                void *ret;
1941
1942                ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1943                *dma_handle = handle;
1944                return ret;
1945        }
1946}
1947
1948/**
1949 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1950 * @dev: The device for which the DMA addresses were allocated
1951 * @size: The size of the region
1952 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1953 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1954 */
1955static inline void ib_dma_free_coherent(struct ib_device *dev,
1956                                        size_t size, void *cpu_addr,
1957                                        u64 dma_handle)
1958{
1959        if (dev->dma_ops)
1960                dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1961        else
1962                dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1963}
1964
1965/**
1966 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1967 *   by an HCA.
1968 * @pd: The protection domain associated assigned to the registered region.
1969 * @phys_buf_array: Specifies a list of physical buffers to use in the
1970 *   memory region.
1971 * @num_phys_buf: Specifies the size of the phys_buf_array.
1972 * @mr_access_flags: Specifies the memory access rights.
1973 * @iova_start: The offset of the region's starting I/O virtual address.
1974 */
1975struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1976                             struct ib_phys_buf *phys_buf_array,
1977                             int num_phys_buf,
1978                             int mr_access_flags,
1979                             u64 *iova_start);
1980
1981/**
1982 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1983 *   Conceptually, this call performs the functions deregister memory region
1984 *   followed by register physical memory region.  Where possible,
1985 *   resources are reused instead of deallocated and reallocated.
1986 * @mr: The memory region to modify.
1987 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1988 *   properties of the memory region are being modified.
1989 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1990 *   the new protection domain to associated with the memory region,
1991 *   otherwise, this parameter is ignored.
1992 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1993 *   field specifies a list of physical buffers to use in the new
1994 *   translation, otherwise, this parameter is ignored.
1995 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1996 *   field specifies the size of the phys_buf_array, otherwise, this
1997 *   parameter is ignored.
1998 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1999 *   field specifies the new memory access rights, otherwise, this
2000 *   parameter is ignored.
2001 * @iova_start: The offset of the region's starting I/O virtual address.
2002 */
2003int ib_rereg_phys_mr(struct ib_mr *mr,
2004                     int mr_rereg_mask,
2005                     struct ib_pd *pd,
2006                     struct ib_phys_buf *phys_buf_array,
2007                     int num_phys_buf,
2008                     int mr_access_flags,
2009                     u64 *iova_start);
2010
2011/**
2012 * ib_query_mr - Retrieves information about a specific memory region.
2013 * @mr: The memory region to retrieve information about.
2014 * @mr_attr: The attributes of the specified memory region.
2015 */
2016int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2017
2018/**
2019 * ib_dereg_mr - Deregisters a memory region and removes it from the
2020 *   HCA translation table.
2021 * @mr: The memory region to deregister.
2022 */
2023int ib_dereg_mr(struct ib_mr *mr);
2024
2025/**
2026 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2027 *   IB_WR_FAST_REG_MR send work request.
2028 * @pd: The protection domain associated with the region.
2029 * @max_page_list_len: requested max physical buffer list length to be
2030 *   used with fast register work requests for this MR.
2031 */
2032struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2033
2034/**
2035 * ib_alloc_fast_reg_page_list - Allocates a page list array
2036 * @device - ib device pointer.
2037 * @page_list_len - size of the page list array to be allocated.
2038 *
2039 * This allocates and returns a struct ib_fast_reg_page_list * and a
2040 * page_list array that is at least page_list_len in size.  The actual
2041 * size is returned in max_page_list_len.  The caller is responsible
2042 * for initializing the contents of the page_list array before posting
2043 * a send work request with the IB_WC_FAST_REG_MR opcode.
2044 *
2045 * The page_list array entries must be translated using one of the
2046 * ib_dma_*() functions just like the addresses passed to
2047 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2048 * ib_fast_reg_page_list must not be modified by the caller until the
2049 * IB_WC_FAST_REG_MR work request completes.
2050 */
2051struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2052                                struct ib_device *device, int page_list_len);
2053
2054/**
2055 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2056 *   page list array.
2057 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2058 */
2059void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2060
2061/**
2062 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2063 *   R_Key and L_Key.
2064 * @mr - struct ib_mr pointer to be updated.
2065 * @newkey - new key to be used.
2066 */
2067static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2068{
2069        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2070        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2071}
2072
2073/**
2074 * ib_alloc_mw - Allocates a memory window.
2075 * @pd: The protection domain associated with the memory window.
2076 */
2077struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2078
2079/**
2080 * ib_bind_mw - Posts a work request to the send queue of the specified
2081 *   QP, which binds the memory window to the given address range and
2082 *   remote access attributes.
2083 * @qp: QP to post the bind work request on.
2084 * @mw: The memory window to bind.
2085 * @mw_bind: Specifies information about the memory window, including
2086 *   its address range, remote access rights, and associated memory region.
2087 */
2088static inline int ib_bind_mw(struct ib_qp *qp,
2089                             struct ib_mw *mw,
2090                             struct ib_mw_bind *mw_bind)
2091{
2092        /* XXX reference counting in corresponding MR? */
2093        return mw->device->bind_mw ?
2094                mw->device->bind_mw(qp, mw, mw_bind) :
2095                -ENOSYS;
2096}
2097
2098/**
2099 * ib_dealloc_mw - Deallocates a memory window.
2100 * @mw: The memory window to deallocate.
2101 */
2102int ib_dealloc_mw(struct ib_mw *mw);
2103
2104/**
2105 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2106 * @pd: The protection domain associated with the unmapped region.
2107 * @mr_access_flags: Specifies the memory access rights.
2108 * @fmr_attr: Attributes of the unmapped region.
2109 *
2110 * A fast memory region must be mapped before it can be used as part of
2111 * a work request.
2112 */
2113struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2114                            int mr_access_flags,
2115                            struct ib_fmr_attr *fmr_attr);
2116
2117/**
2118 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2119 * @fmr: The fast memory region to associate with the pages.
2120 * @page_list: An array of physical pages to map to the fast memory region.
2121 * @list_len: The number of pages in page_list.
2122 * @iova: The I/O virtual address to use with the mapped region.
2123 */
2124static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2125                                  u64 *page_list, int list_len,
2126                                  u64 iova)
2127{
2128        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2129}
2130
2131/**
2132 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2133 * @fmr_list: A linked list of fast memory regions to unmap.
2134 */
2135int ib_unmap_fmr(struct list_head *fmr_list);
2136
2137/**
2138 * ib_dealloc_fmr - Deallocates a fast memory region.
2139 * @fmr: The fast memory region to deallocate.
2140 */
2141int ib_dealloc_fmr(struct ib_fmr *fmr);
2142
2143/**
2144 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2145 * @qp: QP to attach to the multicast group.  The QP must be type
2146 *   IB_QPT_UD.
2147 * @gid: Multicast group GID.
2148 * @lid: Multicast group LID in host byte order.
2149 *
2150 * In order to send and receive multicast packets, subnet
2151 * administration must have created the multicast group and configured
2152 * the fabric appropriately.  The port associated with the specified
2153 * QP must also be a member of the multicast group.
2154 */
2155int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2156
2157/**
2158 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2159 * @qp: QP to detach from the multicast group.
2160 * @gid: Multicast group GID.
2161 * @lid: Multicast group LID in host byte order.
2162 */
2163int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2164
2165/**
2166 * ib_alloc_xrcd - Allocates an XRC domain.
2167 * @device: The device on which to allocate the XRC domain.
2168 */
2169struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2170
2171/**
2172 * ib_dealloc_xrcd - Deallocates an XRC domain.
2173 * @xrcd: The XRC domain to deallocate.
2174 */
2175int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2176
2177#endif /* IB_VERBS_H */
2178
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.